diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2025-02-26 03:03:25 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2025-02-26 03:03:25 +0300 |
commit | 0b119045b79a672bc6d8f18641c60fc8ce1b4585 (patch) | |
tree | 69c63ecfec55b9576c34dc742e0c38f46f8a317a /drivers/net/ethernet | |
parent | 7f7573bd4f37d4edc168c5b5def0bc2a1951c657 (diff) | |
parent | d082ecbc71e9e0bf49883ee4afd435a77a5101b6 (diff) | |
download | linux-0b119045b79a672bc6d8f18641c60fc8ce1b4585.tar.xz |
Merge tag 'v6.14-rc4' into next
Sync up with the mainline.
Diffstat (limited to 'drivers/net/ethernet')
427 files changed, 20550 insertions, 6778 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 9a542e3c9b05..977b42bc1e8c 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -159,7 +159,7 @@ config ETHOC Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. config OA_TC6 - tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" + tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST depends on SPI select PHYLIB help diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 63c8a2328142..c1295dfad0d0 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -74,7 +74,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) if (threshold < time_since_last_napi && napi_scheduled) { netdev_err(dev, "napi handler hasn't been called for a long time but is scheduled\n"); - reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION; + reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION; } schedule_reset: /* Change the state of the device to trigger reset diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 72db9f9e7bee..c6bd803f5b0c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev) val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a->write_csr(ioaddr, CSR3, val); - napi_enable(&lp->napi); + napi_enable_locked(&lp->napi); } /* @@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, if (netif_running(dev)) pcnet32_netif_stop(dev); + netdev_lock(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ @@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, } spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size); @@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) if (netif_running(dev)) pcnet32_netif_stop(dev); + netdev_lock(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ @@ -1122,6 +1125,7 @@ clean_up: lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ } spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); return rc; } /* end pcnet32_loopback_test */ @@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev) return -EAGAIN; } + netdev_lock(dev); spin_lock_irqsave(&lp->lock, flags); /* Check for a valid station address */ if (!is_valid_ether_addr(dev->dev_addr)) { @@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev) goto err_free_ring; } - napi_enable(&lp->napi); + napi_enable_locked(&lp->napi); /* Re-initialize the PCNET32, and start it when done. */ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); @@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev) lp->a->read_csr(ioaddr, CSR0)); spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); return 0; /* Always succeed */ @@ -2315,6 +2321,7 @@ err_free_ring: err_free_irq: spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); free_irq(dev->irq, dev); return rc; } diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index 2681889162a2..44971e71991f 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -118,7 +118,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, if (err && err != -EIO) return err; - listlen = fw_list.num_fw_slots; + listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names)); for (i = 0; i < listlen; i++) { if (i < ARRAY_SIZE(fw_slotnames)) strscpy(buf, fw_slotnames[i], sizeof(buf)); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index b0a6c96b6ef4..b35808d3d07f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -505,21 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) { - char *buf; - - if (!pdata->xgbe_debugfs) - return; - - buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); - if (!buf) - return; - - if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf)) - goto out; - - debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs, - pdata->xgbe_debugfs->d_parent, buf); - -out: - kfree(buf); + debugfs_change_name(pdata->xgbe_debugfs, + "amd-xgbe-%s", pdata->netdev->name); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 6a716337f48b..268399dfcf22 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phydev->phy_id; @@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x04, 0x0d01); phy_write(phy_data->phydev, 0x00, 0x9140); - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - supported); - linkmode_set_bit_array(phy_gbit_features_array, - ARRAY_SIZE(phy_gbit_features_array), - supported); - - linkmode_copy(phy_data->phydev->supported, supported); + linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES); phy_support_asym_pause(phy_data->phydev); @@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; unsigned int phy_id = phy_data->phydev->phy_id; @@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) reg = phy_read(phy_data->phydev, 0x00); phy_write(phy_data->phydev, 0x00, reg & ~0x00800); - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - supported); - linkmode_set_bit_array(phy_gbit_features_array, - ARRAY_SIZE(phy_gbit_features_array), - supported); - linkmode_copy(phy_data->phydev->supported, supported); + linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES); phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index e641dbbea1e2..b854b6b42d77 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) if (dev->of_node) { struct clk *parent = clk_get_parent(pdata->clk); + long rate = rgmii_clock(pdata->phy_speed); - switch (pdata->phy_speed) { - case SPEED_10: - clk_set_rate(parent, 2500000); - break; - case SPEED_100: - clk_set_rate(parent, 25000000); - break; - default: - clk_set_rate(parent, 125000000); - break; - } + if (rate < 0) + rate = 125000000; + + clk_set_rate(parent, rate); } #ifdef CONFIG_ACPI else { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index fe0e3e2a8117..71e50fc65c14 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -1441,7 +1441,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down) aq_ptp_ring_free(self); aq_ptp_free(self); - if (likely(self->aq_fw_ops->deinit) && link_down) { + /* May be invoked during hot unplug. */ + if (pci_device_is_present(self->pdev) && + likely(self->aq_fw_ops->deinit) && link_down) { mutex_lock(&self->fwreq_mutex); self->aq_fw_ops->deinit(self->aq_hw); mutex_unlock(&self->fwreq_mutex); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h index f93cb3da44b0..8fc75bcedb70 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h @@ -348,8 +348,6 @@ struct bcmasp_intf { /* Used if per intf wol irq */ int wol_irq; unsigned int wol_irq_enabled:1; - - struct ethtool_keee eee; }; #define NUM_NET_FILTERS 32 @@ -601,5 +599,4 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, void bcmasp_netfilt_suspend(struct bcmasp_intf *intf); -void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable); #endif diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index 9da5ae29a105..a537c121d3e2 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -348,58 +348,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return err; } -void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable) -{ - u32 reg; - - reg = umac_rl(intf, UMC_EEE_CTRL); - if (enable) - reg |= EEE_EN; - else - reg &= ~EEE_EN; - umac_wl(intf, reg, UMC_EEE_CTRL); - - intf->eee.eee_enabled = enable; -} - static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e) { - struct bcmasp_intf *intf = netdev_priv(dev); - struct ethtool_keee *p = &intf->eee; - if (!dev->phydev) return -ENODEV; - e->tx_lpi_enabled = p->tx_lpi_enabled; - e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); - return phy_ethtool_get_eee(dev->phydev, e); } static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e) { - struct bcmasp_intf *intf = netdev_priv(dev); - struct ethtool_keee *p = &intf->eee; - int ret; - if (!dev->phydev) return -ENODEV; - if (!p->eee_enabled) { - bcmasp_eee_enable_set(intf, false); - } else { - ret = phy_init_eee(dev->phydev, 0); - if (ret) { - netif_err(intf, hw, dev, - "EEE initialization failed: %d\n", ret); - return ret; - } - - umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER); - intf->eee.tx_lpi_enabled = e->tx_lpi_enabled; - bcmasp_eee_enable_set(intf, true); - } - return phy_ethtool_set_eee(dev->phydev, e); } diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index cfd50efbdbc0..45ec1a9214a2 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -619,7 +619,6 @@ static void bcmasp_adj_link(struct net_device *dev) struct phy_device *phydev = dev->phydev; u32 cmd_bits = 0, reg; int changed = 0; - bool active; if (intf->old_link != phydev->link) { changed = 1; @@ -677,8 +676,13 @@ static void bcmasp_adj_link(struct net_device *dev) } umac_wl(intf, reg, UMC_CMD); - active = phy_init_eee(phydev, 0) >= 0; - bcmasp_eee_enable_set(intf, active); + umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER); + reg = umac_rl(intf, UMC_EEE_CTRL); + if (phydev->enable_tx_lpi) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + umac_wl(intf, reg, UMC_EEE_CTRL); } reg = rgmii_rl(intf, RGMII_OOB_CNTRL); @@ -1055,6 +1059,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) /* Indicate that the MAC is responsible for PHY PM */ phydev->mac_managed_pm = true; + + /* Set phylib's copy of the LPI timer */ + phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); } umac_reset(intf); @@ -1331,7 +1338,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf) ASP_WAKEUP_INTR2_MASK_CLEAR); } - if (intf->eee.eee_enabled && intf->parent->eee_fixup) + if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled && + intf->parent->eee_fixup) intf->parent->eee_fixup(intf, true); netif_dbg(intf, wol, ndev, "entered WOL mode\n"); @@ -1373,7 +1381,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf) { u32 reg; - if (intf->eee.eee_enabled && intf->parent->eee_fixup) + if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled && + intf->parent->eee_fixup) intf->parent->eee_fixup(intf, false); reg = umac_rl(intf, UMC_MPD_CTRL); @@ -1404,9 +1413,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf) bcmasp_resume_from_wol(intf); - if (intf->eee.eee_enabled) - bcmasp_eee_enable_set(intf, true); - netif_device_attach(dev); return 0; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 42672c63f108..bc4e1f3b3752 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1933,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev) unsigned int i; int ret; - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) { + netdev_err(dev, "could not enable priv clock\n"); + return ret; + } /* Reset UniMAC */ umac_reset(priv); @@ -2591,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev) goto err_deregister_notifier; } - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(&pdev->dev, "could not enable priv clock\n"); + goto err_deregister_netdev; + } priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, @@ -2605,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) return 0; +err_deregister_netdev: + unregister_netdev(dev); err_deregister_notifier: unregister_netdevice_notifier(&priv->netdev_notifier); err_deregister_fixed_link: @@ -2774,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) if (!netif_running(dev)) return 0; - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) { + netdev_err(dev, "could not enable priv clock\n"); + return ret; + } + if (priv->wolopts) clk_disable_unprepare(priv->wol_clk); diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index ecce23cecbea..4e266ce41180 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac) static int bgmac_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + struct device_node *phy_node; struct bgmac *bgmac; struct resource *regs; int ret; @@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev) bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset; bgmac->get_bus_clock = platform_bgmac_get_bus_clock; bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32; - if (of_parse_phandle(np, "phy-handle", 0)) { + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (phy_node) { + of_node_put(phy_node); bgmac->phy_connect = platform_phy_connect; } else { bgmac->phy_connect = bgmac_phy_connect_direct; diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index d73ef262991d..6fee9a41839c 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -328,8 +328,7 @@ #define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */ #define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \ BGMAC_RX_FRAME_OFFSET) -/* Jumbo frame size with FCS */ -#define BGMAC_RX_MAX_FRAME_SIZE 9724 +#define BGMAC_RX_MAX_FRAME_SIZE 1536 #define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE) #define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b86f980fa7ea..7b8b5b39c7bb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -81,7 +81,6 @@ MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) #define BNXT_RX_DMA_OFFSET NET_SKB_PAD -#define BNXT_RX_COPY_THRESH 256 #define BNXT_TX_PUSH_THRESH 164 @@ -1343,13 +1342,13 @@ static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, if (!skb) return NULL; - dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, + dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak, bp->rx_dir); memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, len + NET_IP_ALIGN); - dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, + dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak, bp->rx_dir); skb_put(skb, len); @@ -1842,7 +1841,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } - if (len <= bp->rx_copy_thresh) { + if (len <= bp->rx_copybreak) { skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(cpr, idx, agg_bufs); @@ -2176,7 +2175,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (len <= bp->rx_copy_thresh) { + if (len <= bp->rx_copybreak) { if (!xdp_active) skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); else @@ -2855,6 +2854,7 @@ static int bnxt_async_event_process(struct bnxt *bp, } __bnxt_queue_sp_work(bp); async_event_process_exit: + bnxt_ulp_async_events(bp, cmpl); return 0; } @@ -2897,6 +2897,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) return 0; } +static bool bnxt_vnic_is_active(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + + return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0; +} + static irqreturn_t bnxt_msix(int irq, void *dev_instance) { struct bnxt_napi *bnapi = dev_instance; @@ -3164,7 +3171,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; } } - if (bp->flags & BNXT_FLAG_DIM) { + if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { struct dim_sample dim_sample = {}; dim_update_sample(cpr->event_ctr, @@ -3295,7 +3302,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) poll_done: cpr_rx = &cpr->cp_ring_arr[0]; if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && - (bp->flags & BNXT_FLAG_DIM)) { + (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { struct dim_sample dim_sample = {}; dim_update_sample(cpr->event_ctr, @@ -4601,6 +4608,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp) bp->flags |= BNXT_FLAG_GRO; } +static void bnxt_init_ring_params(struct bnxt *bp) +{ + unsigned int rx_size; + + bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK; + /* Try to fit 4 chunks into a 4k page */ + rx_size = SZ_1K - + NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size); +} + /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must * be set on entry. */ @@ -4615,12 +4633,11 @@ void bnxt_set_ring_params(struct bnxt *bp) rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; ring_size = bp->rx_ring_size; bp->rx_agg_ring_size = 0; bp->rx_agg_nr_pages = 0; - if (bp->flags & BNXT_FLAG_TPA) + if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS) agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); bp->flags &= ~BNXT_FLAG_JUMBO; @@ -4660,7 +4677,10 @@ void bnxt_set_ring_params(struct bnxt *bp) ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } else { - rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); + rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK, + bp->rx_copybreak, + bp->dev->cfg_pending->hds_thresh); + rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN); rx_space = rx_size + NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } @@ -4701,7 +4721,7 @@ void bnxt_set_ring_params(struct bnxt *bp) /* Changing allocation mode of RX rings. * TODO: Update when extending xdp_rxq_info to support allocation modes. */ -int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) +static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { struct net_device *dev = bp->dev; @@ -4722,15 +4742,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) bp->rx_skb_func = bnxt_rx_page_skb; } bp->rx_dir = DMA_BIDIRECTIONAL; - /* Disable LRO or GRO_HW */ - netdev_update_features(dev); } else { dev->max_mtu = bp->max_mtu; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; } - return 0; +} + +void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) +{ + __bnxt_set_rx_skb_mode(bp, page_mode); + + if (!page_mode) { + int rx, tx; + + bnxt_get_max_rings(bp, &rx, &tx, true); + if (rx > 1) { + bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features |= NETIF_F_LRO; + } + } + + /* Update LRO and GRO_HW availability */ + netdev_update_features(bp->dev); } static void bnxt_free_vnic_attributes(struct bnxt *bp) @@ -6557,6 +6592,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) { + u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh; struct hwrm_vnic_plcmodes_cfg_input *req; int rc; @@ -6566,16 +6602,14 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); + req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); - if (BNXT_RX_PAGE_MODE(bp)) { - req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); - } else { + if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) { req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); req->enables |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); - req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); - req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + req->hds_threshold = cpu_to_le16(hds_thresh); } req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); return hwrm_req_send(bp, req); @@ -7266,6 +7300,26 @@ err_out: return rc; } +static void bnxt_cancel_dim(struct bnxt *bp) +{ + int i; + + /* DIM work is initialized in bnxt_enable_napi(). Proceed only + * if NAPI is enabled. + */ + if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) + return; + + /* Make sure NAPI sees that the VNIC is disabled */ + synchronize_net(); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_napi *bnapi = rxr->bnapi; + + cancel_work_sync(&bnapi->cp_ring.dim.work); + } +} + static int hwrm_ring_free_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, int cmpl_ring_id) @@ -7366,6 +7420,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } + bnxt_cancel_dim(bp); for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); @@ -8279,16 +8334,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (rc) goto func_qcfg_exit; + flags = le16_to_cpu(resp->flags); #ifdef CONFIG_BNXT_SRIOV if (BNXT_VF(bp)) { struct bnxt_vf_info *vf = &bp->vf; vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; + if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF) + vf->flags |= BNXT_VF_TRUST; + else + vf->flags &= ~BNXT_VF_TRUST; } else { bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); } #endif - flags = le16_to_cpu(resp->flags); if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; @@ -9117,10 +9176,18 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ena = 0; if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { pg_lvl = 2; - extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); - /* allocate extra qps if fw supports RoCE fast qp destroy feature */ - extra_qps += fast_qpmd_qps; - extra_srqs = min_t(u32, 8192, max_srqs - srqs); + if (BNXT_SW_RES_LMT(bp)) { + extra_qps = max_qps - l2_qps - qp1_qps; + extra_srqs = max_srqs - srqs; + } else { + extra_qps = min_t(u32, 65536, + max_qps - l2_qps - qp1_qps); + /* allocate extra qps if fw supports RoCE fast qp + * destroy feature + */ + extra_qps += fast_qpmd_qps; + extra_srqs = min_t(u32, 8192, max_srqs - srqs); + } if (fast_qpmd_qps) ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; } @@ -9156,14 +9223,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) goto skip_rdma; ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; - /* 128K extra is needed to accommodate static AH context - * allocation by f/w. - */ - num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); - num_ah = min_t(u32, num_mr, 1024 * 128); - ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; - if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) - ctxm->mrav_av_entries = num_ah; + if (BNXT_SW_RES_LMT(bp) && + ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) { + num_ah = ctxm->mrav_av_entries; + num_mr = ctxm->max_entries - num_ah; + } else { + /* 128K extra is needed to accommodate static AH context + * allocation by f/w. + */ + num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); + num_ah = min_t(u32, num_mr, 1024 * 128); + ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; + if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) + ctxm->mrav_av_entries = num_ah; + } rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); if (rc) @@ -9470,6 +9543,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_UDP_GSO_CAP; if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; + if (flags_ext2 & + FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS; if (BNXT_PF(bp) && (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; @@ -11309,8 +11385,6 @@ static void bnxt_disable_napi(struct bnxt *bp) if (bnapi->in_reset) cpr->sw_stats->rx.rx_resets++; napi_disable(&bnapi->napi); - if (bnapi->rx_ring) - cancel_work_sync(&cpr->dim.work); } } @@ -11530,6 +11604,26 @@ hwrm_phy_qcaps_exit: return rc; } +static void bnxt_hwrm_mac_qcaps(struct bnxt *bp) +{ + struct hwrm_port_mac_qcaps_output *resp; + struct hwrm_port_mac_qcaps_input *req; + int rc; + + if (bp->hwrm_spec_code < 0x10a03) + return; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS); + if (rc) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) + bp->mac_flags = resp->flags; + hwrm_req_drop(bp, req); +} + static bool bnxt_support_dropped(u16 advertising, u16 supported) { u16 diff = advertising ^ supported; @@ -15572,8 +15666,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID); } - + /* Make sure NAPI sees that the VNIC is disabled */ + synchronize_net(); rxr = &bp->rx_ring[idx]; + cancel_work_sync(&rxr->bnapi->cp_ring.dim.work); bnxt_hwrm_rx_ring_free(bp, rxr, false); bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); rxr->rx_next_cons = 0; @@ -15658,6 +15754,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) bp->dev->priv_flags |= IFF_SUPP_NOFCS; else bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; + + bp->mac_flags = 0; + bnxt_hwrm_mac_qcaps(bp); + if (!fw_dflt) return 0; @@ -16186,8 +16286,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (bp->max_fltr < BNXT_MAX_FLTR) bp->max_fltr = BNXT_MAX_FLTR; bnxt_init_l2_fltr_tbl(bp); - bnxt_set_rx_skb_mode(bp, false); + __bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); + bnxt_init_ring_params(bp); bnxt_set_ring_params(bp); bnxt_rdma_aux_device_init(bp); rc = bnxt_set_dflt_rings(bp, true); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7df7a2233307..2373f423a523 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -34,6 +34,9 @@ #include <linux/firmware/broadcom/tee_bnxt_fw.h> #endif +#define BNXT_DEFAULT_RX_COPYBREAK 256 +#define BNXT_MAX_RX_COPYBREAK 1024 + extern struct list_head bnxt_block_cb_list; struct page_pool; @@ -2241,8 +2244,6 @@ struct bnxt { #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO) #define BNXT_FLAG_JUMBO 0x10 #define BNXT_FLAG_STRIP_VLAN 0x20 - #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ - BNXT_FLAG_LRO) #define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 @@ -2263,6 +2264,9 @@ struct bnxt { #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_TX_COAL_CMPL 0x8000000 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 + #define BNXT_FLAG_HDS 0x20000000 + #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ + BNXT_FLAG_LRO | BNXT_FLAG_HDS) #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -2270,6 +2274,11 @@ struct bnxt { #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) +#ifdef CONFIG_BNXT_SRIOV +#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST) +#else +#define BNXT_VF_IS_TRUSTED(bp) 0 +#endif #define BNXT_NPAR(bp) ((bp)->port_partition_type) #define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) @@ -2342,7 +2351,7 @@ struct bnxt { enum dma_data_direction rx_dir; u32 rx_ring_size; u32 rx_agg_ring_size; - u32 rx_copy_thresh; + u32 rx_copybreak; u32 rx_ring_mask; u32 rx_agg_ring_mask; int rx_nr_pages; @@ -2482,6 +2491,7 @@ struct bnxt { #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38) #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39) #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40) + #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41) u32 fw_dbg_cap; @@ -2501,6 +2511,8 @@ struct bnxt { ((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV) #define BNXT_ROCE_VF_RESC_CAP(bp) \ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED) +#define BNXT_SW_RES_LMT(bp) \ + ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS) u32 hwrm_spec_code; u16 hwrm_cmd_seq; @@ -2660,6 +2672,11 @@ struct bnxt { #define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8) #define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8) + /* copied from flags in hwrm_port_mac_qcaps_output */ + u8 mac_flags; +#define BNXT_MAC_FL_NO_MAC_LPBK \ + PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED + u8 num_tests; struct bnxt_test_info *test_info; @@ -2762,6 +2779,8 @@ struct bnxt { #define SFF_MODULE_ID_QSFP28 0x11 #define BNXT_MAX_PHY_I2C_RESP_SIZE 64 +#define BNXT_HDS_THRESHOLD_MAX 1023 + static inline u32 bnxt_tx_avail(struct bnxt *bp, const struct bnxt_tx_ring_info *txr) { @@ -2846,7 +2865,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); -int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); +void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr); void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr); int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index d87681d71106..9c5820839514 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -24,6 +24,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/net_tstamp.h> #include <linux/timecounter.h> +#include <net/netdev_queues.h> #include <net/netlink.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -833,6 +834,8 @@ static void bnxt_get_ringparam(struct net_device *dev, ering->rx_pending = bp->rx_ring_size; ering->rx_jumbo_pending = bp->rx_agg_ring_size; ering->tx_pending = bp->tx_ring_size; + + kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; } static int bnxt_set_ringparam(struct net_device *dev, @@ -840,16 +843,35 @@ static int bnxt_set_ringparam(struct net_device *dev, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { + u8 tcp_data_split = kernel_ering->tcp_data_split; struct bnxt *bp = netdev_priv(dev); + u8 hds_config_mod; if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) return -EINVAL; + hds_config_mod = tcp_data_split != dev->cfg->hds_config; + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) + return -EINVAL; + + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && + hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { + NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); + return -EINVAL; + } + if (netif_running(dev)) bnxt_close_nic(bp, false, false); + if (hds_config_mod) { + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) + bp->flags |= BNXT_FLAG_HDS; + else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) + bp->flags &= ~BNXT_FLAG_HDS; + } + bp->rx_ring_size = ering->rx_pending; bp->tx_ring_size = ering->tx_pending; bnxt_set_ring_params(bp); @@ -2050,7 +2072,8 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, int rc; regs->version = 0; - bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); + if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) + bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) return; @@ -4161,7 +4184,7 @@ err: static void bnxt_get_pkgver(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - char buf[FW_VER_STR_LEN]; + char buf[FW_VER_STR_LEN - 5]; int len; if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { @@ -4327,6 +4350,45 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) return 0; } +static int bnxt_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct bnxt *bp = netdev_priv(dev); + u32 rx_copybreak; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + rx_copybreak = *(u32 *)data; + if (rx_copybreak > BNXT_MAX_RX_COPYBREAK) + return -ERANGE; + if (rx_copybreak != bp->rx_copybreak) { + if (netif_running(dev)) + return -EBUSY; + bp->rx_copybreak = rx_copybreak; + } + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int bnxt_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct bnxt *bp = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = bp->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, u16 page_number, u8 bank, u16 start_addr, u16 data_length, @@ -4375,6 +4437,9 @@ static int bnxt_get_module_info(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int rc; + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) + return -EPERM; + /* No point in going further if phy status indicates * module is not inserted or if it is powered down or * if it is of type 10GBase-T @@ -4426,6 +4491,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev, u16 start = eeprom->offset, length = eeprom->len; int rc = 0; + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) + return -EPERM; + memset(data, 0, eeprom->len); /* Read A0 portion of the EEPROM */ @@ -4480,6 +4548,12 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int rc; + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + NL_SET_ERR_MSG_MOD(extack, + "Module read not permitted on untrusted VF"); + return -EPERM; + } + rc = bnxt_get_module_status(bp, extack); if (rc) return rc; @@ -4777,7 +4851,8 @@ static int bnxt_run_loopback(struct bnxt *bp) cpr = &rxr->bnapi->cp_ring; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) cpr = rxr->rx_cpr; - pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); + pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK, + bp->rx_copybreak)); skb = netdev_alloc_skb(bp->dev, pkt_size); if (!skb) return -ENOMEM; @@ -4887,35 +4962,44 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, bnxt_close_nic(bp, true, false); bnxt_run_fw_tests(bp, test_mask, &test_results); - buf[BNXT_MACLPBK_TEST_IDX] = 1; - bnxt_hwrm_mac_loopback(bp, true); - msleep(250); rc = bnxt_half_open_nic(bp); if (rc) { - bnxt_hwrm_mac_loopback(bp, false); etest->flags |= ETH_TEST_FL_FAILED; return; } + buf[BNXT_MACLPBK_TEST_IDX] = 1; + if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) + goto skip_mac_loopback; + + bnxt_hwrm_mac_loopback(bp, true); + msleep(250); if (bnxt_run_loopback(bp)) etest->flags |= ETH_TEST_FL_FAILED; else buf[BNXT_MACLPBK_TEST_IDX] = 0; bnxt_hwrm_mac_loopback(bp, false); +skip_mac_loopback: + buf[BNXT_PHYLPBK_TEST_IDX] = 1; + if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) + goto skip_phy_loopback; + bnxt_hwrm_phy_loopback(bp, true, false); msleep(1000); - if (bnxt_run_loopback(bp)) { - buf[BNXT_PHYLPBK_TEST_IDX] = 1; + if (bnxt_run_loopback(bp)) etest->flags |= ETH_TEST_FL_FAILED; - } + else + buf[BNXT_PHYLPBK_TEST_IDX] = 0; +skip_phy_loopback: + buf[BNXT_EXTLPBK_TEST_IDX] = 1; if (do_ext_lpbk) { etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; bnxt_hwrm_phy_loopback(bp, true, true); msleep(1000); - if (bnxt_run_loopback(bp)) { - buf[BNXT_EXTLPBK_TEST_IDX] = 1; + if (bnxt_run_loopback(bp)) etest->flags |= ETH_TEST_FL_FAILED; - } + else + buf[BNXT_EXTLPBK_TEST_IDX] = 0; } bnxt_hwrm_phy_loopback(bp, false, false); bnxt_half_close_nic(bp); @@ -5309,6 +5393,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { ETHTOOL_COALESCE_STATS_BLOCK_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_CQE, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | + ETHTOOL_RING_USE_HDS_THRS, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, .get_fec_stats = bnxt_get_fec_stats, @@ -5350,6 +5436,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_link_ext_stats = bnxt_get_link_ext_stats, .get_eee = bnxt_get_eee, .set_eee = bnxt_set_eee, + .get_tunable = bnxt_get_tunable, + .set_tunable = bnxt_set_tunable, .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index b771c84cdd89..e4a7f37036ed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -208,7 +208,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev, rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); if (rc) - return rc; + goto drop_req; hwrm_req_timeout(bp, req, fw_msg->timeout); resp = hwrm_req_hold(bp, req); @@ -220,6 +220,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev, memcpy(fw_msg->resp, resp, resp_len); } +drop_req: hwrm_req_drop(bp, req); return rc; } @@ -297,6 +298,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp) { struct bnxt_en_dev *edev = bp->edev; struct bnxt_ulp_ops *ops; + bool reset = false; if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) return; @@ -310,7 +312,9 @@ void bnxt_ulp_irq_stop(struct bnxt *bp) ops = rtnl_dereference(ulp->ulp_ops); if (!ops || !ops->ulp_irq_stop) return; - ops->ulp_irq_stop(ulp->handle); + if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) + reset = true; + ops->ulp_irq_stop(ulp->handle, reset); } } @@ -345,9 +349,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) } } -int bnxt_register_async_events(struct bnxt_en_dev *edev, - unsigned long *events_bmap, - u16 max_id) +void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) +{ + u16 event_id = le16_to_cpu(cmpl->event_id); + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + struct bnxt_ulp *ulp; + + if (!bnxt_ulp_registered(edev)) + return; + ulp = edev->ulp_tbl; + + rcu_read_lock(); + + ops = rcu_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_async_notifier) + goto exit_unlock_rcu; + if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id) + goto exit_unlock_rcu; + + /* Read max_async_event_id first before testing the bitmap. */ + smp_rmb(); + + if (test_bit(event_id, ulp->async_events_bmap)) + ops->ulp_async_notifier(ulp->handle, cmpl); +exit_unlock_rcu: + rcu_read_unlock(); +} + +void bnxt_register_async_events(struct bnxt_en_dev *edev, + unsigned long *events_bmap, u16 max_id) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); @@ -359,7 +390,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev, smp_wmb(); ulp->max_async_event_id = max_id; bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true); - return 0; } EXPORT_SYMBOL(bnxt_register_async_events); @@ -416,6 +446,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) edev->flags |= BNXT_EN_FLAG_VF; if (BNXT_ROCE_VF_RESC_CAP(bp)) edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT; + if (BNXT_SW_RES_LMT(bp)) + edev->flags |= BNXT_EN_FLAG_SW_RES_LMT; edev->chip_num = bp->chip_num; edev->hw_ring_stats_size = bp->hw_ring_stats_size; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 5d6aac60f236..7fa3b8d1ebd2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -30,7 +30,9 @@ struct bnxt_msix_entry { }; struct bnxt_ulp_ops { - void (*ulp_irq_stop)(void *); + /* async_notifier() cannot sleep (in BH context) */ + void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *); + void (*ulp_irq_stop)(void *, bool); void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *); }; @@ -65,6 +67,8 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_VF 0x10 #define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF) #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20 + #define BNXT_EN_FLAG_SW_RES_LMT 0x40 +#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT) struct bnxt_ulp *ulp_tbl; int l2_db_size; /* Doorbell BAR size in @@ -124,6 +128,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops, void *handle); void bnxt_unregister_dev(struct bnxt_en_dev *edev); int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg); -int bnxt_register_async_events(struct bnxt_en_dev *edev, - unsigned long *events_bmap, u16 max_id); +void bnxt_register_async_events(struct bnxt_en_dev *edev, + unsigned long *events_bmap, u16 max_id); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index f88b641533fc..e6c64e4bd66c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -395,6 +395,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); return -EOPNOTSUPP; } + if (prog && bp->flags & BNXT_FLAG_HDS) { + netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n"); + return -EOPNOTSUPP; + } if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); return -EOPNOTSUPP; @@ -422,15 +426,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) bnxt_set_rx_skb_mode(bp, true); xdp_features_set_redirect_target(dev, true); } else { - int rx, tx; - xdp_features_clear_redirect_target(dev); bnxt_set_rx_skb_mode(bp, false); - bnxt_get_max_rings(bp, &rx, &tx, true); - if (rx > 1) { - bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; - bp->dev->hw_features |= NETIF_F_LRO; - } } bp->tx_nr_rings_xdp = tx_xdp; bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 0715ea5bf13e..3b082114f2e5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; + u32 phy_wolopts = 0; - if (dev->phydev) + if (dev->phydev) { phy_ethtool_get_wol(dev->phydev, wol); + phy_wolopts = wol->wolopts; + } /* MAC is not wake-up capable, return what the PHY does */ if (!device_can_wakeup(kdev)) @@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) /* Overlay MAC capabilities with that of the PHY queried before */ wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; - wol->wolopts = priv->wolopts; - memset(wol->sopass, 0, sizeof(wol->sopass)); + wol->wolopts |= priv->wolopts; + /* Return the PHY configured magic password */ + if (phy_wolopts & WAKE_MAGICSECURE) + return; + + /* Otherwise the MAC one */ + memset(wol->sopass, 0, sizeof(wol->sopass)); if (wol->wolopts & WAKE_MAGICSECURE) memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); } @@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) /* Try Wake-on-LAN from the PHY first */ if (dev->phydev) { ret = phy_ethtool_set_wol(dev->phydev, wol); - if (ret != -EOPNOTSUPP) + if (ret != -EOPNOTSUPP && wol->wolopts) return ret; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 9cc8db10a8d6..d9d675f1ebfe 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -55,6 +55,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/crc32poly.h> +#include <linux/dmi.h> #include <net/checksum.h> #include <net/gso.h> @@ -7424,7 +7425,7 @@ static void tg3_napi_enable(struct tg3 *tp) for (i = 0; i < tp->irq_cnt; i++) { tnapi = &tp->napi[i]; - napi_enable(&tnapi->napi); + napi_enable_locked(&tnapi->napi); if (tnapi->tx_buffers) { netif_queue_set_napi(tp->dev, txq_idx, NETDEV_QUEUE_TYPE_TX, @@ -7445,9 +7446,10 @@ static void tg3_napi_init(struct tg3 *tp) int i; for (i = 0; i < tp->irq_cnt; i++) { - netif_napi_add(tp->dev, &tp->napi[i].napi, - i ? tg3_poll_msix : tg3_poll); - netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec); + netif_napi_add_locked(tp->dev, &tp->napi[i].napi, + i ? tg3_poll_msix : tg3_poll); + netif_napi_set_irq_locked(&tp->napi[i].napi, + tp->napi[i].irq_vec); } } @@ -11259,6 +11261,8 @@ static void tg3_timer_stop(struct tg3 *tp) static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) __releases(tp->lock) __acquires(tp->lock) + __releases(tp->dev->lock) + __acquires(tp->dev->lock) { int err; @@ -11271,7 +11275,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) tg3_timer_stop(tp); tp->irq_sync = 0; tg3_napi_enable(tp); + netdev_unlock(tp->dev); dev_close(tp->dev); + netdev_lock(tp->dev); tg3_full_lock(tp, 0); } return err; @@ -11299,6 +11305,7 @@ static void tg3_reset_task(struct work_struct *work) tg3_netif_stop(tp); + netdev_lock(tp->dev); tg3_full_lock(tp, 1); if (tg3_flag(tp, TX_RECOVERY_PENDING)) { @@ -11318,12 +11325,14 @@ static void tg3_reset_task(struct work_struct *work) * call cancel_work_sync() and wait forever. */ tg3_flag_clear(tp, RESET_TASK_PENDING); + netdev_unlock(tp->dev); dev_close(tp->dev); goto out; } tg3_netif_start(tp); tg3_full_unlock(tp); + netdev_unlock(tp->dev); tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); out: @@ -11683,9 +11692,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, if (err) goto out_ints_fini; + netdev_lock(dev); tg3_napi_init(tp); tg3_napi_enable(tp); + netdev_unlock(dev); for (i = 0; i < tp->irq_cnt; i++) { err = tg3_request_irq(tp, i); @@ -12569,6 +12580,7 @@ static int tg3_set_ringparam(struct net_device *dev, irq_sync = 1; } + netdev_lock(dev); tg3_full_lock(tp, irq_sync); tp->rx_pending = ering->rx_pending; @@ -12597,6 +12609,7 @@ static int tg3_set_ringparam(struct net_device *dev, } tg3_full_unlock(tp); + netdev_unlock(dev); if (irq_sync && !err) tg3_phy_start(tp); @@ -12678,6 +12691,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam irq_sync = 1; } + netdev_lock(dev); tg3_full_lock(tp, irq_sync); if (epause->autoneg) @@ -12707,6 +12721,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam } tg3_full_unlock(tp); + netdev_unlock(dev); } tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; @@ -13911,6 +13926,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, data[TG3_INTERRUPT_TEST] = 1; } + netdev_lock(dev); tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@ -13922,6 +13938,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, } tg3_full_unlock(tp); + netdev_unlock(dev); if (irq_sync && !err2) tg3_phy_start(tp); @@ -14365,6 +14382,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_set_mtu(dev, tp, new_mtu); + netdev_lock(dev); tg3_full_lock(tp, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@ -14384,6 +14402,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_netif_start(tp); tg3_full_unlock(tp); + netdev_unlock(dev); if (!err) tg3_phy_start(tp); @@ -18164,6 +18183,7 @@ static int tg3_resume(struct device *device) netif_device_attach(dev); + netdev_lock(dev); tg3_full_lock(tp, 0); tg3_ape_driver_state_change(tp, RESET_KIND_INIT); @@ -18180,6 +18200,7 @@ static int tg3_resume(struct device *device) out: tg3_full_unlock(tp); + netdev_unlock(dev); if (!err) tg3_phy_start(tp); @@ -18192,6 +18213,50 @@ unlock: static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal + * PCIe AER event on the tg3 device if the tg3 device is not, or cannot + * be, powered down. + */ +static const struct dmi_system_id tg3_restart_aer_quirk_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"), + }, + }, + {} +}; + static void tg3_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -18208,6 +18273,19 @@ static void tg3_shutdown(struct pci_dev *pdev) if (system_state == SYSTEM_POWER_OFF) tg3_power_down(tp); + else if (system_state == SYSTEM_RESTART && + dmi_first_match(tg3_restart_aer_quirk_table) && + pdev->current_state != PCI_D3cold && + pdev->current_state != PCI_UNKNOWN) { + /* Disable PCIe AER on the tg3 to avoid a fatal + * error during this system restart. + */ + pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_CERE | + PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_URRE); + } rtnl_unlock(); @@ -18260,7 +18338,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, done: if (state == pci_channel_io_perm_failure) { if (netdev) { + netdev_lock(netdev); tg3_napi_enable(tp); + netdev_unlock(netdev); dev_close(netdev); } err = PCI_ERS_RESULT_DISCONNECT; @@ -18314,7 +18394,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) done: if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { + netdev_lock(netdev); tg3_napi_enable(tp); + netdev_unlock(netdev); dev_close(netdev); } rtnl_unlock(); @@ -18340,12 +18422,14 @@ static void tg3_io_resume(struct pci_dev *pdev) if (!netdev || !netif_running(netdev)) goto done; + netdev_lock(netdev); tg3_full_lock(tp, 0); tg3_ape_driver_state_change(tp, RESET_KIND_INIT); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, true); if (err) { tg3_full_unlock(tp); + netdev_unlock(netdev); netdev_err(netdev, "Cannot restart hardware after reset.\n"); goto done; } @@ -18357,6 +18441,7 @@ static void tg3_io_resume(struct pci_dev *pdev) tg3_netif_start(tp); tg3_full_unlock(tp); + netdev_unlock(netdev); tg3_phy_start(tp); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index daa416fb1724..48496209fb16 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -530,19 +530,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed) if (bp->phy_interface == PHY_INTERFACE_MODE_MII) return; - switch (speed) { - case SPEED_10: - rate = 2500000; - break; - case SPEED_100: - rate = 25000000; - break; - case SPEED_1000: - rate = 125000000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) return; - } rate_rounded = clk_round_rate(bp->tx_clk, rate); if (rate_rounded < 0) @@ -578,6 +568,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, } static void macb_usx_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); @@ -608,7 +599,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs, return 0; } -static void macb_pcs_get_state(struct phylink_pcs *pcs, +static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { state->link = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index bc3af0054406..2f0b3e389e62 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, struct adapter *adap = container_of(t, struct adapter, tids); struct sk_buff *skb; - WARN_ON(tid_out_of_range(&adap->tids, tid)); + if (tid_out_of_range(&adap->tids, tid)) { + dev_err(adap->pdev_dev, "tid %d out of range\n", tid); + return; + } if (t->tid_tab[tid - adap->tids.tid_base]) { t->tid_tab[tid - adap->tids.tid_base] = NULL; @@ -6559,6 +6562,9 @@ static void cxgb4_advance_esn_state(struct xfrm_state *x) { struct adapter *adap = netdev2adap(x->xso.dev); + if (x->xso.dir != XFRM_DEV_OFFLOAD_IN) + return; + if (!mutex_trylock(&uld_mutex)) { dev_dbg(adap->pdev_dev, "crypto uld critical resource is under use\n"); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c index 96fd31d75dfd..daa1ebaef511 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c @@ -346,8 +346,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, * driver. Once driver synthesizes cpl_pass_accept_req the skb will go * through the regular cpl_pass_accept_req processing in TOM. */ - skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) - - pktshift, GFP_ATOMIC); + skb = alloc_skb(size_add(gl->tot_len, + sizeof(struct cpl_pass_accept_req)) - + pktshift, GFP_ATOMIC); if (unlikely(!skb)) return NULL; __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9913952ccb42..49f6cab01ed5 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -109,7 +109,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { {0, 0}, /* 0 - 4 Gbps */ {0, 3}, /* 4 - 10 Gbps */ - {3, 6}, /* 10 - 40 Gbps */ + {3, 6}, /* 10+ Gbps */ }; static void enic_init_affinity_hint(struct enic *enic) @@ -428,6 +428,36 @@ static void enic_mtu_check(struct enic *enic) } } +static void enic_set_rx_coal_setting(struct enic *enic) +{ + unsigned int speed; + int index = -1; + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + + /* 1. Read the link speed from fw + * 2. Pick the default range for the speed + * 3. Update it in enic->rx_coalesce_setting + */ + speed = vnic_dev_port_speed(enic->vdev); + if (speed > ENIC_LINK_SPEED_10G) + index = ENIC_LINK_40G_INDEX; + else if (speed > ENIC_LINK_SPEED_4G) + index = ENIC_LINK_10G_INDEX; + else + index = ENIC_LINK_4G_INDEX; + + rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; + rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; + rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; + + /* Start with the value provided by UCSM */ + for (index = 0; index < enic->rq_count; index++) + enic->cq[index].cur_rx_coal_timeval = + enic->config.intr_timer_usec; + + rx_coal->use_adaptive_rx_coalesce = 1; +} + static void enic_link_check(struct enic *enic) { int link_status = vnic_dev_link_status(enic->vdev); @@ -436,6 +466,7 @@ static void enic_link_check(struct enic *enic) if (link_status && !carrier_ok) { netdev_info(enic->netdev, "Link UP\n"); netif_carrier_on(enic->netdev); + enic_set_rx_coal_setting(enic); } else if (!link_status && carrier_ok) { netdev_info(enic->netdev, "Link DOWN\n"); netif_carrier_off(enic->netdev); @@ -1901,36 +1932,6 @@ static void enic_synchronize_irqs(struct enic *enic) } } -static void enic_set_rx_coal_setting(struct enic *enic) -{ - unsigned int speed; - int index = -1; - struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - - /* 1. Read the link speed from fw - * 2. Pick the default range for the speed - * 3. Update it in enic->rx_coalesce_setting - */ - speed = vnic_dev_port_speed(enic->vdev); - if (ENIC_LINK_SPEED_10G < speed) - index = ENIC_LINK_40G_INDEX; - else if (ENIC_LINK_SPEED_4G < speed) - index = ENIC_LINK_10G_INDEX; - else - index = ENIC_LINK_4G_INDEX; - - rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; - rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; - rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; - - /* Start with the value provided by UCSM */ - for (index = 0; index < enic->rq_count; index++) - enic->cq[index].cur_rx_coal_timeval = - enic->config.intr_timer_usec; - - rx_coal->use_adaptive_rx_coalesce = 1; -} - static int enic_dev_notify_set(struct enic *enic) { int err; @@ -3063,7 +3064,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) timer_setup(&enic->notify_timer, enic_notify_timer, 0); enic_rfs_flw_tbl_init(enic); - enic_set_rx_coal_setting(enic); INIT_WORK(&enic->reset, enic_reset); INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8735e333034c..b87eaf0c250c 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev) unregister_netdev(ndev); dm9000_release_board(pdev, dm); - free_netdev(ndev); /* free device structure */ if (dm->power_supply) regulator_disable(dm->power_supply); + free_netdev(ndev); /* free device structure */ + dev_dbg(&pdev->dev, "released and freed device\n"); } diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 95a5295d0361..0d030cb0b21c 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -1966,23 +1966,41 @@ failed: static void tsnep_queue_enable(struct tsnep_queue *queue) { + struct tsnep_adapter *adapter = queue->adapter; + + netif_napi_set_irq(&queue->napi, queue->irq); napi_enable(&queue->napi); - tsnep_enable_irq(queue->adapter, queue->irq_mask); + tsnep_enable_irq(adapter, queue->irq_mask); - if (queue->tx) + if (queue->tx) { + netif_queue_set_napi(adapter->netdev, queue->tx->queue_index, + NETDEV_QUEUE_TYPE_TX, &queue->napi); tsnep_tx_enable(queue->tx); + } - if (queue->rx) + if (queue->rx) { + netif_queue_set_napi(adapter->netdev, queue->rx->queue_index, + NETDEV_QUEUE_TYPE_RX, &queue->napi); tsnep_rx_enable(queue->rx); + } } static void tsnep_queue_disable(struct tsnep_queue *queue) { - if (queue->tx) + struct tsnep_adapter *adapter = queue->adapter; + + if (queue->rx) + netif_queue_set_napi(adapter->netdev, queue->rx->queue_index, + NETDEV_QUEUE_TYPE_RX, NULL); + + if (queue->tx) { tsnep_tx_disable(queue->tx, &queue->napi); + netif_queue_set_napi(adapter->netdev, queue->tx->queue_index, + NETDEV_QUEUE_TYPE_TX, NULL); + } napi_disable(&queue->napi); - tsnep_disable_irq(queue->adapter, queue->irq_mask); + tsnep_disable_irq(adapter, queue->irq_mask); /* disable RX after NAPI polling has been disabled, because RX can be * enabled during NAPI polling diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 75401d2a5fb4..a2d7300925a8 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -81,8 +81,7 @@ config UCC_GETH tristate "Freescale QE Gigabit Ethernet" depends on QUICC_ENGINE && PPC32 select FSL_PQ_MDIO - select PHYLIB - select FIXED_PHY + select PHYLINK help This driver supports the Gigabit Ethernet mode of the QUICC Engine, which is available on some Freescale SOCs. diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index bf5baef5c3e0..4948b4906584 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2281,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv, new_xdpf->len = xdpf->len; new_xdpf->headroom = priv->tx_headroom; new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; - new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; + new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0; /* Release the initial buffer */ xdp_return_frame_rx_napi(xdpf); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index a293b08f36d4..147a93bf9fa9 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -780,13 +780,14 @@ struct ethsw_dump_ctx { static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, struct ethsw_dump_ctx *dump) { + struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx; int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; u32 portid = NETLINK_CB(dump->cb->skb).portid; u32 seq = dump->cb->nlh->nlmsg_seq; struct nlmsghdr *nlh; struct ndmsg *ndm; - if (dump->idx < dump->cb->args[2]) + if (dump->idx < ctx->fdb_idx) goto skip; nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 535969fa0fdb..6a6fc819dfde 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -146,6 +146,27 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, return 0; } +static bool enetc_tx_csum_offload_check(struct sk_buff *skb) +{ + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + case offsetof(struct udphdr, check): + return true; + default: + return false; + } +} + +static bool enetc_skb_is_ipv6(struct sk_buff *skb) +{ + return vlan_get_protocol(skb) == htons(ETH_P_IPV6); +} + +static bool enetc_skb_is_tcp(struct sk_buff *skb) +{ + return skb->csum_offset == offsetof(struct tcphdr, check); +} + static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; @@ -163,6 +184,29 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) dma_addr_t dma; u8 flags = 0; + enetc_clear_tx_bd(&temp_bd); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* Can not support TSD and checksum offload at the same time */ + if (priv->active_offloads & ENETC_F_TXCSUM && + enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) { + temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, + skb_network_offset(skb)); + temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, + skb_network_header_len(skb) / 4); + temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T, + enetc_skb_is_ipv6(skb)); + if (enetc_skb_is_tcp(skb)) + temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, + ENETC_TXBD_L4T_TCP); + else + temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, + ENETC_TXBD_L4T_UDP); + flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS; + } else if (skb_checksum_help(skb)) { + return 0; + } + } + i = tx_ring->next_to_use; txbd = ENETC_TXBD(*tx_ring, i); prefetchw(txbd); @@ -173,7 +217,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) temp_bd.addr = cpu_to_le64(dma); temp_bd.buf_len = cpu_to_le16(len); - temp_bd.lstatus = 0; tx_swbd = &tx_ring->tx_swbd[i]; tx_swbd->dma = dma; @@ -489,8 +532,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso } } +static int enetc_lso_count_descs(const struct sk_buff *skb) +{ + /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD + * for linear area data but not include LSO header, namely + * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's + * okay, we only need to consider the worst case). And 1 BD + * for gap. + */ + return skb_shinfo(skb)->nr_frags + 4; +} + +static int enetc_lso_get_hdr_len(const struct sk_buff *skb) +{ + int hdr_len, tlen; + + tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr); + hdr_len = skb_transport_offset(skb) + tlen; + + return hdr_len; +} + +static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso) +{ + lso->lso_seg_size = skb_shinfo(skb)->gso_size; + lso->ipv6 = enetc_skb_is_ipv6(skb); + lso->tcp = skb_is_gso_tcp(skb); + lso->l3_hdr_len = skb_network_header_len(skb); + lso->l3_start = skb_network_offset(skb); + lso->hdr_len = enetc_lso_get_hdr_len(skb); + lso->total_len = skb->len - lso->hdr_len; +} + +static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + int *i, struct enetc_lso_t *lso) +{ + union enetc_tx_bd txbd_tmp, *txbd; + struct enetc_tx_swbd *tx_swbd; + u16 frm_len, frm_len_ext; + u8 flags, e_flags = 0; + dma_addr_t addr; + char *hdr; + + /* Get the first BD of the LSO BDs chain */ + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + + /* Prepare LSO header: MAC + IP + TCP/UDP */ + hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE; + memcpy(hdr, skb->data, lso->hdr_len); + addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; + + /* {frm_len_ext, frm_len} indicates the total length of + * large transmit data unit. frm_len contains the 16 least + * significant bits and frm_len_ext contains the 4 most + * significant bits. + */ + frm_len = lso->total_len & 0xffff; + frm_len_ext = (lso->total_len >> 16) & 0xf; + + /* Set the flags of the first BD */ + flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO | + ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS; + + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len); + + /* first BD needs frm_len and offload flags set */ + txbd_tmp.frm_len = cpu_to_le16(frm_len); + txbd_tmp.flags = flags; + + txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start); + /* l3_hdr_size in 32-bits (4 bytes) */ + txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, + lso->l3_hdr_len / 4); + if (lso->ipv6) + txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T; + else + txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS; + + txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ? + ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP); + + /* For the LSO header we do not set the dma address since + * we do not want it unmapped when we do cleanup. We still + * set len so that we count the bytes sent. + */ + tx_swbd->len = lso->hdr_len; + tx_swbd->do_twostep_tstamp = false; + tx_swbd->check_wb = false; + + /* Actually write the header in the BD */ + *txbd = txbd_tmp; + + /* Get the next BD, and the next BD is extended BD */ + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + + enetc_clear_tx_bd(&txbd_tmp); + if (skb_vlan_tag_present(skb)) { + /* Setup the VLAN fields */ + txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + txbd_tmp.ext.tpid = ENETC_TPID_8021Q; + e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; + } + + /* Write the BD */ + txbd_tmp.ext.e_flags = e_flags; + txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size); + txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext); + *txbd = txbd_tmp; +} + +static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, + int *i, struct enetc_lso_t *lso, int *count) +{ + union enetc_tx_bd txbd_tmp, *txbd = NULL; + struct enetc_tx_swbd *tx_swbd; + skb_frag_t *frag; + dma_addr_t dma; + u8 flags = 0; + int len, f; + + len = skb_headlen(skb) - lso->hdr_len; + if (len > 0) { + dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len, + len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + return -ENOMEM; + + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + *count += 1; + + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.addr = cpu_to_le64(dma); + txbd_tmp.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 0; + tx_swbd->dir = DMA_TO_DEVICE; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { + if (txbd) + *txbd = txbd_tmp; + + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag); + if (dma_mapping_error(tx_ring->dev, dma)) + return -ENOMEM; + + /* Get the next BD */ + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + *count += 1; + + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.addr = cpu_to_le64(dma); + txbd_tmp.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 1; + tx_swbd->dir = DMA_TO_DEVICE; + } + + /* Last BD needs 'F' bit set */ + flags |= ENETC_TXBD_FLAGS_F; + txbd_tmp.flags = flags; + *txbd = txbd_tmp; + + tx_swbd->is_eof = 1; + tx_swbd->skb = skb; + + return 0; +} + +static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + struct enetc_tx_swbd *tx_swbd; + struct enetc_lso_t lso = {0}; + int err, i, count = 0; + + /* Initialize the LSO handler */ + enetc_lso_start(skb, &lso); + i = tx_ring->next_to_use; + + enetc_lso_map_hdr(tx_ring, skb, &i, &lso); + /* First BD and an extend BD */ + count += 2; + + err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count); + if (err) + goto dma_err; + + /* Go to the next BD */ + enetc_bdr_idx_inc(tx_ring, &i); + tx_ring->next_to_use = i; + enetc_update_tx_ring_tail(tx_ring); + + return count; + +dma_err: + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (--count); + + return 0; +} + static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); int hdr_len, total_len, data_len; struct enetc_tx_swbd *tx_swbd; union enetc_tx_bd *txbd; @@ -556,7 +824,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb bd_data_num++; tso_build_data(skb, &tso, size); - if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len)) + if (unlikely(bd_data_num >= priv->max_frags && data_len)) goto err_chained_bd; } @@ -594,7 +862,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_bdr *tx_ring; - int count, err; + int count; /* Queue one-step Sync packet if already locked */ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { @@ -608,16 +876,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, tx_ring = priv->tx_ring[skb->queue_mapping]; if (skb_is_gso(skb)) { - if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { - netif_stop_subqueue(ndev, tx_ring->index); - return NETDEV_TX_BUSY; - } + /* LSO data unit lengths of up to 256KB are supported */ + if (priv->active_offloads & ENETC_F_LSO && + (skb->len - enetc_lso_get_hdr_len(skb)) <= + ENETC_LSO_MAX_DATA_LEN) { + if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } - enetc_lock_mdio(); - count = enetc_map_tx_tso_buffs(tx_ring, skb); - enetc_unlock_mdio(); + count = enetc_lso_hw_offload(tx_ring, skb); + } else { + if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + enetc_lock_mdio(); + count = enetc_map_tx_tso_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } } else { - if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags)) if (unlikely(skb_linearize(skb))) goto drop_packet_err; @@ -627,11 +907,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (skb->ip_summed == CHECKSUM_PARTIAL) { - err = skb_checksum_help(skb); - if (err) - goto drop_packet_err; - } enetc_lock_mdio(); count = enetc_map_tx_buffs(tx_ring, skb); enetc_unlock_mdio(); @@ -640,7 +915,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, if (unlikely(!count)) goto drop_packet_err; - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags)) netif_stop_subqueue(ndev, tx_ring->index); return NETDEV_TX_OK; @@ -908,7 +1183,8 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && __netif_subqueue_stopped(ndev, tx_ring->index) && !test_bit(ENETC_TX_DOWN, &priv->flags) && - (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { + (enetc_bd_unused(tx_ring) >= + ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) { netif_wake_subqueue(ndev, tx_ring->index); } @@ -1759,6 +2035,9 @@ void enetc_get_si_caps(struct enetc_si *si) rss = enetc_rd(hw, ENETC_SIRSSCAPR); si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); } + + if (val & ENETC_SIPCAPR0_LSO) + si->hw_features |= ENETC_SI_F_LSO; } EXPORT_SYMBOL_GPL(enetc_get_si_caps); @@ -2055,6 +2334,14 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) return 0; } +static void enetc_set_lso_flags_mask(struct enetc_hw *hw) +{ + enetc_wr(hw, ENETC4_SILSOSFMR0, + SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK, + ENETC4_TCP_NL_SEG_FLAGS_DMASK)); + enetc_wr(hw, ENETC4_SILSOSFMR1, 0); +} + int enetc_configure_si(struct enetc_ndev_priv *priv) { struct enetc_si *si = priv->si; @@ -2068,6 +2355,9 @@ int enetc_configure_si(struct enetc_ndev_priv *priv) /* enable SI */ enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); + if (si->hw_features & ENETC_SI_F_LSO) + enetc_set_lso_flags_mask(hw); + /* TODO: RSS support for i.MX95 will be supported later, and the * is_enetc_rev1() condition will be removed */ @@ -3269,17 +3559,21 @@ EXPORT_SYMBOL_GPL(enetc_pci_remove); static const struct enetc_drvdata enetc_pf_data = { .sysclk_freq = ENETC_CLK_400M, .pmac_offset = ENETC_PMAC_OFFSET, + .max_frags = ENETC_MAX_SKB_FRAGS, .eth_ops = &enetc_pf_ethtool_ops, }; static const struct enetc_drvdata enetc4_pf_data = { .sysclk_freq = ENETC_CLK_333M, + .tx_csum = true, + .max_frags = ENETC4_MAX_SKB_FRAGS, .pmac_offset = ENETC4_PMAC_OFFSET, .eth_ops = &enetc4_pf_ethtool_ops, }; static const struct enetc_drvdata enetc_vf_data = { .sysclk_freq = ENETC_CLK_400M, + .max_frags = ENETC_MAX_SKB_FRAGS, .eth_ops = &enetc_vf_ethtool_ops, }; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 72fa03dbc2dd..4ad4eb5c5a74 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -41,6 +41,18 @@ struct enetc_tx_swbd { u8 qbv_en:1; }; +struct enetc_lso_t { + bool ipv6; + bool tcp; + u8 l3_hdr_len; + u8 hdr_len; /* LSO header length */ + u8 l3_start; + u16 lso_seg_size; + int total_len; /* total data length, not include LSO header */ +}; + +#define ENETC_LSO_MAX_DATA_LEN SZ_256K + #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE #define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ #define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ @@ -59,9 +71,16 @@ struct enetc_rx_swbd { /* ENETC overhead: optional extension BD + 1 BD gap */ #define ENETC_TXBDS_NEEDED(val) ((val) + 2) -/* max # of chained Tx BDs is 15, including head and extension BD */ +/* For LS1028A, max # of chained Tx BDs is 15, including head and + * extension BD. + */ #define ENETC_MAX_SKB_FRAGS 13 -#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) +/* For ENETC v4 and later versions, max # of chained Tx BDs is 63, + * including head and extension BD, but the range of MAX_SKB_FRAGS + * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS. + */ +#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS +#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1) struct enetc_ring_stats { unsigned int packets; @@ -231,9 +250,12 @@ enum enetc_errata { #define ENETC_SI_F_PSFP BIT(0) #define ENETC_SI_F_QBV BIT(1) #define ENETC_SI_F_QBU BIT(2) +#define ENETC_SI_F_LSO BIT(3) struct enetc_drvdata { u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */ + u8 tx_csum:1; + u8 max_frags; u64 sysclk_freq; const struct ethtool_ops *eth_ops; }; @@ -341,6 +363,8 @@ enum enetc_active_offloads { ENETC_F_QBV = BIT(9), ENETC_F_QCI = BIT(10), ENETC_F_QBU = BIT(11), + ENETC_F_TXCSUM = BIT(12), + ENETC_F_LSO = BIT(13), }; enum enetc_flags_bit { @@ -375,6 +399,7 @@ struct enetc_ndev_priv { u16 msg_enable; u8 preemptible_tcs; + u8 max_frags; /* The maximum number of BDs for fragments */ enum enetc_active_offloads active_offloads; diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h index 26b220677448..695cb07c74bc 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h @@ -12,6 +12,29 @@ #define NXP_ENETC_VENDOR_ID 0x1131 #define NXP_ENETC_PF_DEV_ID 0xe101 +/**********************Station interface registers************************/ +/* Station interface LSO segmentation flag mask register 0/1 */ +#define ENETC4_SILSOSFMR0 0x1300 +#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16) +#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0) +#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \ + FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first)) + +#define ENETC4_SILSOSFMR1 0x1304 +#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0) +#define ENETC4_TCP_FLAGS_FIN BIT(0) +#define ENETC4_TCP_FLAGS_SYN BIT(1) +#define ENETC4_TCP_FLAGS_RST BIT(2) +#define ENETC4_TCP_FLAGS_PSH BIT(3) +#define ENETC4_TCP_FLAGS_ACK BIT(4) +#define ENETC4_TCP_FLAGS_URG BIT(5) +#define ENETC4_TCP_FLAGS_ECE BIT(6) +#define ENETC4_TCP_FLAGS_CWR BIT(7) +#define ENETC4_TCP_FLAGS_NS BIT(8) +/* According to tso_build_hdr(), clear all special flags for not last packet. */ +#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \ + ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH) + /***************************ENETC port registers**************************/ #define ENETC4_ECAPR0 0x0 #define ECAPR0_RFS BIT(2) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 55ba949230ff..4098f01479bc 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -25,6 +25,7 @@ #define ENETC_SIPCAPR0 0x20 #define ENETC_SIPCAPR0_RSS BIT(8) #define ENETC_SIPCAPR0_RFS BIT(2) +#define ENETC_SIPCAPR0_LSO BIT(1) #define ENETC_SIPCAPR1 0x24 #define ENETC_SITGTGR 0x30 #define ENETC_SIRBGCR 0x38 @@ -554,11 +555,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg) union enetc_tx_bd { struct { __le64 addr; - __le16 buf_len; + union { + __le16 buf_len; + __le16 hdr_len; /* For LSO, ENETC 4.1 and later */ + }; __le16 frm_len; union { struct { - u8 reserved[3]; + u8 l3_aux0; +#define ENETC_TX_BD_L3_START GENMASK(6, 0) +#define ENETC_TX_BD_IPCS BIT(7) + u8 l3_aux1; +#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0) +#define ENETC_TX_BD_L3T BIT(7) + u8 l4_aux; +#define ENETC_TX_BD_L4T GENMASK(7, 5) +#define ENETC_TXBD_L4T_UDP 1 +#define ENETC_TXBD_L4T_TCP 2 u8 flags; }; /* default layout */ __le32 txstart; @@ -569,23 +582,27 @@ union enetc_tx_bd { __le32 tstamp; __le16 tpid; __le16 vid; - u8 reserved[6]; + __le16 lso_sg_size; /* For ENETC 4.1 and later */ + __le16 frm_len_ext; /* For ENETC 4.1 and later */ + u8 reserved[2]; u8 e_flags; u8 flags; } ext; /* Tx BD extension */ struct { __le32 tstamp; - u8 reserved[10]; + u8 reserved[8]; + __le16 lso_err_count; /* For ENETC 4.1 and later */ u8 status; u8 flags; } wb; /* writeback descriptor */ }; enum enetc_txbd_flags { - ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */ + ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */ ENETC_TXBD_FLAGS_TSE = BIT(1), + ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */ ENETC_TXBD_FLAGS_W = BIT(2), - ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */ + ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */ ENETC_TXBD_FLAGS_TXSTART = BIT(4), ENETC_TXBD_FLAGS_EX = BIT(6), ENETC_TXBD_FLAGS_F = BIT(7) @@ -654,6 +671,8 @@ union enetc_rx_bd { #define ENETC_CBD_FLAGS_SF BIT(7) /* short format */ #define ENETC_CBD_STATUS_MASK 0xf +#define ENETC_TPID_8021Q 0 + struct enetc_cmd_rfse { u8 smac_h[6]; u8 smac_m[6]; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c index 0eecfc833164..3fd9b0727875 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c @@ -101,6 +101,7 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, priv->msg_enable = (NETIF_MSG_WOL << 1) - 1; priv->sysclk_freq = si->drvdata->sysclk_freq; + priv->max_frags = si->drvdata->max_frags; ndev->netdev_ops = ndev_ops; enetc_set_ethtool_ops(ndev); ndev->watchdog_timeo = 5 * HZ; @@ -109,16 +110,24 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; ndev->priv_flags |= IFF_UNICAST_FLT; + if (si->drvdata->tx_csum) + priv->active_offloads |= ENETC_F_TXCSUM; + + if (si->hw_features & ENETC_SI_F_LSO) + priv->active_offloads |= ENETC_F_LSO; + /* TODO: currently, i.MX95 ENETC driver does not support advanced features */ if (!is_enetc_rev1(si)) { ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index a5f8ce576b6e..3768752b6008 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -136,6 +136,7 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1; priv->sysclk_freq = si->drvdata->sysclk_freq; + priv->max_frags = si->drvdata->max_frags; ndev->netdev_ops = ndev_ops; enetc_set_ethtool_ops(ndev); ndev->watchdog_timeo = 5 * HZ; @@ -144,11 +145,13 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_L4; ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 1cca0425d493..c81f2ea588f2 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -671,8 +671,6 @@ struct fec_enet_private { unsigned int tx_time_itr; unsigned int itr_clk_rate; - /* tx lpi eee mode */ - struct ethtool_keee eee; unsigned int clk_ref_rate; /* ptp clock period in ns*/ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 1b55047c0237..f7c4ce8e9a26 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -840,6 +840,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; + struct bufdesc *tmp_bdp; + struct bufdesc_ex *ebdp; struct tso_t tso; unsigned int index = 0; int ret; @@ -913,7 +915,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, return 0; err_release: - /* TODO: Release all used data descriptors for TSO */ + /* Release all used data descriptors for TSO */ + tmp_bdp = txq->bd.cur; + + while (tmp_bdp != bdp) { + /* Unmap data buffers */ + if (tmp_bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(tmp_bdp->cbd_bufaddr), + fec16_to_cpu(tmp_bdp->cbd_datlen), + DMA_TO_DEVICE); + + /* Clear standard buffer descriptor fields */ + tmp_bdp->cbd_sc = 0; + tmp_bdp->cbd_datlen = 0; + tmp_bdp->cbd_bufaddr = 0; + + /* Handle extended descriptor if enabled */ + if (fep->bufdesc_ex) { + ebdp = (struct bufdesc_ex *)tmp_bdp; + ebdp->cbd_esc = 0; + } + + tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd); + } + + dev_kfree_skb_any(skb); + return ret; } @@ -1591,19 +1620,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget) fec_enet_tx_queue(ndev, i, budget); } -static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, +static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, struct bufdesc *bdp, int index) { struct page *new_page; dma_addr_t phys_addr; new_page = page_pool_dev_alloc_pages(rxq->page_pool); - WARN_ON(!new_page); - rxq->rx_skb_info[index].page = new_page; + if (unlikely(!new_page)) + return -ENOMEM; + rxq->rx_skb_info[index].page = new_page; rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); + + return 0; } static u32 @@ -1698,6 +1730,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) int cpu = smp_processor_id(); struct xdp_buff xdp; struct page *page; + __fec32 cbd_bufaddr; u32 sub_len = 4; #if !defined(CONFIG_M5272) @@ -1766,12 +1799,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) index = fec_enet_get_bd_index(bdp, &rxq->bd); page = rxq->rx_skb_info[index].page; + cbd_bufaddr = bdp->cbd_bufaddr; + if (fec_enet_update_cbd(rxq, bdp, index)) { + ndev->stats.rx_dropped++; + goto rx_processing_done; + } + dma_sync_single_for_cpu(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), + fec32_to_cpu(cbd_bufaddr), pkt_len, DMA_FROM_DEVICE); prefetch(page_address(page)); - fec_enet_update_cbd(rxq, bdp, index); if (xdp_prog) { xdp_buff_clear_frags_flag(&xdp); @@ -2045,14 +2083,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) return us * (fep->clk_ref_rate / 1000) / 1000; } -static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) +static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer, + bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; unsigned int sleep_cycle, wake_cycle; if (enable) { - sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); + sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer); wake_cycle = sleep_cycle; } else { sleep_cycle = 0; @@ -2105,7 +2143,9 @@ static void fec_enet_adjust_link(struct net_device *ndev) napi_enable(&fep->napi); } if (fep->quirks & FEC_QUIRK_HAS_EEE) - fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi); + fec_enet_eee_mode_set(ndev, + phy_dev->eee_cfg.tx_lpi_timer, + phy_dev->enable_tx_lpi); } else { if (fep->link) { netif_stop_queue(ndev); @@ -3181,7 +3221,6 @@ static int fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) return -EOPNOTSUPP; @@ -3189,8 +3228,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) if (!netif_running(ndev)) return -ENETDOWN; - edata->tx_lpi_timer = p->tx_lpi_timer; - return phy_ethtool_get_eee(ndev->phydev, edata); } @@ -3198,7 +3235,6 @@ static int fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_keee *p = &fep->eee; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) return -EOPNOTSUPP; @@ -3206,8 +3242,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) if (!netif_running(ndev)) return -ENETDOWN; - p->tx_lpi_timer = edata->tx_lpi_timer; - return phy_ethtool_set_eee(ndev->phydev, edata); } diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index fb416d60dcd7..11887458f050 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2690,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev) { struct fman *fman; struct device_node *fm_node, *muram_node; + void __iomem *base_addr; struct resource *res; u32 val, range[2]; int err, irq; struct clk *clk; u32 clk_rate; - phys_addr_t phys_base_addr; - resource_size_t mem_size; fman = kzalloc(sizeof(*fman), GFP_KERNEL); if (!fman) @@ -2724,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_node_put; fman->dts_params.err_irq = err; - /* Get the FM address */ - res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); - if (!res) { - err = -EINVAL; - dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n", - __func__); - goto fman_node_put; - } - - phys_base_addr = res->start; - mem_size = resource_size(res); - clk = of_clk_get(fm_node, 0); if (IS_ERR(clk)) { err = PTR_ERR(clk); @@ -2803,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev) } } - fman->dts_params.res = - devm_request_mem_region(&of_dev->dev, phys_base_addr, - mem_size, "fman"); - if (!fman->dts_params.res) { - err = -EBUSY; - dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", - __func__); - goto fman_free; - } - - fman->dts_params.base_addr = - devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); - if (!fman->dts_params.base_addr) { - err = -ENOMEM; + base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res); + if (IS_ERR(base_addr)) { + err = PTR_ERR(base_addr); dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); goto fman_free; } + fman->dts_params.base_addr = base_addr; + fman->dts_params.res = res; + fman->dev = &of_dev->dev; err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev); diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 85617bb94959..b3e2a596ad2c 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs) return container_of(pcs, struct fman_mac, pcs); } -static void dtsec_pcs_get_state(struct phylink_pcs *pcs, +static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { struct fman_mac *dtsec = pcs_to_dtsec(pcs); - phylink_mii_c22_pcs_get_state(dtsec->tbidev, state); + phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state); } static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 6663c1768089..88510f822759 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -26,7 +26,7 @@ #include <linux/dma-mapping.h> #include <linux/mii.h> #include <linux/phy.h> -#include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/workqueue.h> #include <linux/of.h> #include <linux/of_address.h> @@ -34,6 +34,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/platform_device.h> +#include <linux/rtnetlink.h> #include <linux/uaccess.h> #include <asm/irq.h> @@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = { .transmitFlowControl = 1, .maxGroupAddrInHash = 4, .maxIndAddrInHash = 4, - .prel = 7, .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ .minFrameLength = 64, .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ @@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0, return 0; } -static int init_check_frame_length_mode(int length_check, - u32 __iomem *maccfg2_register) -{ - u32 value = 0; - - value = in_be32(maccfg2_register); - - if (length_check) - value |= MACCFG2_LC; - else - value &= ~MACCFG2_LC; - - out_be32(maccfg2_register, value); - return 0; -} - -static int init_preamble_length(u8 preamble_length, - u32 __iomem *maccfg2_register) -{ - if ((preamble_length < 3) || (preamble_length > 7)) - return -EINVAL; - - clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, - preamble_length << MACCFG2_PREL_SHIFT); - - return 0; -} - static int init_rx_parameters(int reject_broadcast, int receive_short_frames, int promiscuous, u32 __iomem *upsmr_register) @@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length, return 0; } -static int adjust_enet_interface(struct ucc_geth_private *ugeth) +static bool phy_interface_mode_is_reduced(phy_interface_t interface) { - struct ucc_geth_info *ug_info; - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; - int ret_val; - u32 upsmr, maccfg2; - u16 value; - - ugeth_vdbg("%s: IN", __func__); - - ug_info = ugeth->ug_info; - ug_regs = ugeth->ug_regs; - uf_regs = ugeth->uccf->uf_regs; - - /* Set MACCFG2 */ - maccfg2 = in_be32(&ug_regs->maccfg2); - maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; - if ((ugeth->max_speed == SPEED_10) || - (ugeth->max_speed == SPEED_100)) - maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; - else if (ugeth->max_speed == SPEED_1000) - maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; - maccfg2 |= ug_info->padAndCrc; - out_be32(&ug_regs->maccfg2, maccfg2); - - /* Set UPSMR */ - upsmr = in_be32(&uf_regs->upsmr); - upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | - UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) - upsmr |= UCC_GETH_UPSMR_RPM; - switch (ugeth->max_speed) { - case SPEED_10: - upsmr |= UCC_GETH_UPSMR_R10M; - fallthrough; - case SPEED_100: - if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) - upsmr |= UCC_GETH_UPSMR_RMM; - } - } - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - upsmr |= UCC_GETH_UPSMR_TBIM; - } - if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII) - upsmr |= UCC_GETH_UPSMR_SGMM; - - out_be32(&uf_regs->upsmr, upsmr); - - /* Disable autonegotiation in tbi mode, because by default it - comes up in autonegotiation mode. */ - /* Note that this depends on proper setting in utbipar register. */ - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - struct ucc_geth_info *ug_info = ugeth->ug_info; - struct phy_device *tbiphy; - - if (!ug_info->tbi_node) - pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); - - tbiphy = of_phy_find_device(ug_info->tbi_node); - if (!tbiphy) - pr_warn("Could not get TBI device\n"); - - value = phy_read(tbiphy, ENET_TBI_MII_CR); - value &= ~0x1000; /* Turn off autonegotiation */ - phy_write(tbiphy, ENET_TBI_MII_CR, value); - - put_device(&tbiphy->mdio.dev); - } - - init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); - - ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); - if (ret_val != 0) { - if (netif_msg_probe(ugeth)) - pr_err("Preamble length must be between 3 and 7 inclusive\n"); - return ret_val; - } - - return 0; + return phy_interface_mode_is_rgmii(interface) || + interface == PHY_INTERFACE_MODE_RMII || + interface == PHY_INTERFACE_MODE_RTBI; } static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) @@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) /* allow to xmit again */ netif_tx_wake_all_queues(ugeth->ndev); - __netdev_watchdog_up(ugeth->ndev); -} - -/* Called every time the controller might need to be made - * aware of new link state. The PHY code conveys this - * information through variables in the ugeth structure, and this - * function converts those variables into the appropriate - * register values, and can bring down the device if needed. - */ - -static void adjust_link(struct net_device *dev) -{ - struct ucc_geth_private *ugeth = netdev_priv(dev); - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; - struct phy_device *phydev = ugeth->phydev; - int new_state = 0; - - ug_regs = ugeth->ug_regs; - uf_regs = ugeth->uccf->uf_regs; - - if (phydev->link) { - u32 tempval = in_be32(&ug_regs->maccfg2); - u32 upsmr = in_be32(&uf_regs->upsmr); - /* Now we make sure that we can be in full duplex mode. - * If not, we operate in half-duplex mode. */ - if (phydev->duplex != ugeth->oldduplex) { - new_state = 1; - if (!(phydev->duplex)) - tempval &= ~(MACCFG2_FDX); - else - tempval |= MACCFG2_FDX; - ugeth->oldduplex = phydev->duplex; - } - - if (phydev->speed != ugeth->oldspeed) { - new_state = 1; - switch (phydev->speed) { - case SPEED_1000: - tempval = ((tempval & - ~(MACCFG2_INTERFACE_MODE_MASK)) | - MACCFG2_INTERFACE_MODE_BYTE); - break; - case SPEED_100: - case SPEED_10: - tempval = ((tempval & - ~(MACCFG2_INTERFACE_MODE_MASK)) | - MACCFG2_INTERFACE_MODE_NIBBLE); - /* if reduced mode, re-set UPSMR.R10M */ - if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || - (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - if (phydev->speed == SPEED_10) - upsmr |= UCC_GETH_UPSMR_R10M; - else - upsmr &= ~UCC_GETH_UPSMR_R10M; - } - break; - default: - if (netif_msg_link(ugeth)) - pr_warn( - "%s: Ack! Speed (%d) is not 10/100/1000!", - dev->name, phydev->speed); - break; - } - ugeth->oldspeed = phydev->speed; - } - - if (!ugeth->oldlink) { - new_state = 1; - ugeth->oldlink = 1; - } - - if (new_state) { - /* - * To change the MAC configuration we need to disable - * the controller. To do so, we have to either grab - * ugeth->lock, which is a bad idea since 'graceful - * stop' commands might take quite a while, or we can - * quiesce driver's activity. - */ - ugeth_quiesce(ugeth); - ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); - - out_be32(&ug_regs->maccfg2, tempval); - out_be32(&uf_regs->upsmr, upsmr); - - ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); - ugeth_activate(ugeth); - } - } else if (ugeth->oldlink) { - new_state = 1; - ugeth->oldlink = 0; - ugeth->oldspeed = 0; - ugeth->oldduplex = -1; - } - - if (new_state && netif_msg_link(ugeth)) - phy_print_status(phydev); + netdev_watchdog_up(ugeth->ndev); } /* Initialize TBI PHY interface for communicating with the @@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev) struct phy_device *tbiphy; if (!ug_info->tbi_node) { - dev_warn(&dev->dev, "SGMII mode requires that the device " - "tree specify a tbi-handle\n"); + dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n"); return; } @@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev) put_device(&tbiphy->mdio.dev); } -/* Configure the PHY for dev. - * returns 0 if success. -1 if failure - */ -static int init_phy(struct net_device *dev) +static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) { - struct ucc_geth_private *priv = netdev_priv(dev); - struct ucc_geth_info *ug_info = priv->ug_info; - struct phy_device *phydev; + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; + struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs; + u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2); + u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr); - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; + old_maccfg2 = maccfg2; + old_upsmr = upsmr; - phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, - priv->phy_interface); - if (!phydev) { - dev_err(&dev->dev, "Could not attach to PHY\n"); - return -ENODEV; + /* No length check */ + maccfg2 &= ~MACCFG2_LC; + maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; + upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | + UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); + + if (speed == SPEED_10 || speed == SPEED_100) + maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; + else if (speed == SPEED_1000) + maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; + + maccfg2 |= ug_info->padAndCrc; + + if (phy_interface_mode_is_reduced(interface)) { + + if (interface != PHY_INTERFACE_MODE_RMII) + upsmr |= UCC_GETH_UPSMR_RPM; + + switch (speed) { + case SPEED_10: + upsmr |= UCC_GETH_UPSMR_R10M; + fallthrough; + case SPEED_100: + if (interface != PHY_INTERFACE_MODE_RTBI) + upsmr |= UCC_GETH_UPSMR_RMM; + } } - if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) - uec_configure_serdes(dev); + if (interface == PHY_INTERFACE_MODE_TBI || + interface == PHY_INTERFACE_MODE_RTBI) + upsmr |= UCC_GETH_UPSMR_TBIM; - phy_set_max_speed(phydev, priv->max_speed); + if (interface == PHY_INTERFACE_MODE_SGMII) + upsmr |= UCC_GETH_UPSMR_SGMM; - priv->phydev = phydev; + if (duplex == DUPLEX_HALF) + maccfg2 &= ~(MACCFG2_FDX); + else + maccfg2 |= MACCFG2_FDX; - return 0; + if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) { + /* + * To change the MAC configuration we need to disable + * the controller. To do so, we have to either grab + * ugeth->lock, which is a bad idea since 'graceful + * stop' commands might take quite a while, or we can + * quiesce driver's activity. + */ + ugeth_quiesce(ugeth); + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + out_be32(&ug_regs->maccfg2, maccfg2); + out_be32(&uf_regs->upsmr, upsmr); + + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + ugeth_activate(ugeth); + } + + if (interface == PHY_INTERFACE_MODE_SGMII) + uec_configure_serdes(ndev); + + if (!phylink_autoneg_inband(mode)) { + ug_info->aufc = 0; + ug_info->receiveFlowControl = rx_pause; + ug_info->transmitFlowControl = tx_pause; + + init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + ug_info->transmitFlowControl, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } + + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); +} + +static void ugeth_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); +} + +static void ugeth_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + u16 value; + + if (state->interface == PHY_INTERFACE_MODE_TBI || + state->interface == PHY_INTERFACE_MODE_RTBI) { + struct phy_device *tbiphy; + + if (!ug_info->tbi_node) + pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); + + tbiphy = of_phy_find_device(ug_info->tbi_node); + if (!tbiphy) + pr_warn("Could not get TBI device\n"); + + value = phy_read(tbiphy, ENET_TBI_MII_CR); + value &= ~0x1000; /* Turn off autonegotiation */ + phy_write(tbiphy, ENET_TBI_MII_CR, value); + + put_device(&tbiphy->mdio.dev); + } + + if (phylink_autoneg_inband(mode)) { + ug_info->aufc = 1; + + init_flow_control_params(ug_info->aufc, 1, 1, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } } static void ugeth_dump_regs(struct ucc_geth_private *ugeth) @@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev) static void ucc_geth_stop(struct ucc_geth_private *ugeth) { struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; - struct phy_device *phydev = ugeth->phydev; ugeth_vdbg("%s: IN", __func__); @@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) * Must be done before disabling the controller * or deadlock may happen. */ - phy_stop(phydev); + phylink_stop(ugeth->phylink); /* Disable the controller */ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); @@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) goto err; } - err = adjust_enet_interface(ugeth); - if (err) { - netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); - goto err; - } - /* Set MACSTNADDR1, MACSTNADDR2 */ /* For more details see the hardware spec. */ init_mac_station_addr_regs(dev->dev_addr[0], @@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) &ugeth->ug_regs->macstnaddr1, &ugeth->ug_regs->macstnaddr2); - err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); - if (err) { - netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n"); - goto err; - } - return 0; err: ucc_geth_stop(ugeth); @@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev) return -EINVAL; } - err = init_phy(dev); + err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0); if (err) { - netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n"); - return err; + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; } err = ucc_geth_init_mac(ugeth); @@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev) goto err; } - phy_start(ugeth->phydev); + phylink_start(ugeth->phylink); napi_enable(&ugeth->napi); netdev_reset_queue(dev); netif_start_queue(dev); device_set_wakeup_capable(&dev->dev, - qe_alive_during_sleep() || ugeth->phydev->irq); + qe_alive_during_sleep() || dev->phydev->irq); device_set_wakeup_enable(&dev->dev, ugeth->wol_en); return err; @@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev) cancel_work_sync(&ugeth->timeout_work); ucc_geth_stop(ugeth); - phy_disconnect(ugeth->phydev); - ugeth->phydev = NULL; + phylink_disconnect_phy(ugeth->phylink); free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); @@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work) ucc_geth_stop(ugeth); ucc_geth_init_mac(ugeth); /* Must start PHY here */ - phy_start(ugeth->phydev); + phylink_start(ugeth->phylink); netif_tx_start_all_queues(dev); } @@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); + bool mac_wol = false; if (!netif_running(ndev)) return 0; @@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) */ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); - if (ugeth->wol_en & WAKE_MAGIC) { + if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) { setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); - } else if (!(ugeth->wol_en & WAKE_PHY)) { - phy_stop(ugeth->phydev); + mac_wol = true; } + rtnl_lock(); + phylink_suspend(ugeth->phylink, mac_wol); + rtnl_unlock(); + return 0; } @@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev) } } - ugeth->oldlink = 0; - ugeth->oldspeed = 0; - ugeth->oldduplex = -1; - - phy_stop(ugeth->phydev); - phy_start(ugeth->phydev); + rtnl_lock(); + phylink_resume(ugeth->phylink); + rtnl_unlock(); napi_enable(&ugeth->napi); netif_device_attach(ndev); @@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev) #define ucc_geth_resume NULL #endif -static phy_interface_t to_phy_interface(const char *phy_connection_type) -{ - if (strcasecmp(phy_connection_type, "mii") == 0) - return PHY_INTERFACE_MODE_MII; - if (strcasecmp(phy_connection_type, "gmii") == 0) - return PHY_INTERFACE_MODE_GMII; - if (strcasecmp(phy_connection_type, "tbi") == 0) - return PHY_INTERFACE_MODE_TBI; - if (strcasecmp(phy_connection_type, "rmii") == 0) - return PHY_INTERFACE_MODE_RMII; - if (strcasecmp(phy_connection_type, "rgmii") == 0) - return PHY_INTERFACE_MODE_RGMII; - if (strcasecmp(phy_connection_type, "rgmii-id") == 0) - return PHY_INTERFACE_MODE_RGMII_ID; - if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) - return PHY_INTERFACE_MODE_RGMII_TXID; - if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) - return PHY_INTERFACE_MODE_RGMII_RXID; - if (strcasecmp(phy_connection_type, "rtbi") == 0) - return PHY_INTERFACE_MODE_RTBI; - if (strcasecmp(phy_connection_type, "sgmii") == 0) - return PHY_INTERFACE_MODE_SGMII; - - return PHY_INTERFACE_MODE_MII; -} - static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct ucc_geth_private *ugeth = netdev_priv(dev); @@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!netif_running(dev)) return -EINVAL; - if (!ugeth->phydev) - return -ENODEV; - - return phy_mii_ioctl(ugeth->phydev, rq, cmd); + return phylink_mii_ioctl(ugeth->phylink, rq, cmd); } static const struct net_device_ops ucc_geth_netdev_ops = { @@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_stop = ucc_geth_close, .ndo_start_xmit = ucc_geth_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = ucc_geth_set_mac_addr, .ndo_set_rx_mode = ucc_geth_set_multi, .ndo_tx_timeout = ucc_geth_timeout, @@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which, return 0; } +struct phylink_mac_ops ugeth_mac_ops = { + .mac_link_up = ugeth_mac_link_up, + .mac_link_down = ugeth_mac_link_down, + .mac_config = ugeth_mac_config, +}; + static int ucc_geth_probe(struct platform_device* ofdev) { struct device *device = &ofdev->dev; @@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev) struct net_device *dev = NULL; struct ucc_geth_private *ugeth = NULL; struct ucc_geth_info *ug_info; + struct device_node *phy_node; + struct phylink *phylink; struct resource res; - int err, ucc_num, max_speed = 0; + int err, ucc_num; const unsigned int *prop; phy_interface_t phy_interface; - static const int enet_to_speed[] = { - SPEED_10, SPEED_10, SPEED_10, - SPEED_100, SPEED_100, SPEED_100, - SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, - }; - static const phy_interface_t enet_to_phy_interface[] = { - PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, - PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, - PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, - PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, - PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, - PHY_INTERFACE_MODE_SGMII, - }; ugeth_vdbg("%s: IN", __func__); @@ -3612,57 +3462,35 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.regs = res.start; ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); - ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); - if (!ug_info->phy_node && of_phy_is_fixed_link(np)) { - /* - * In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - err = of_phy_register_fixed_link(np); - if (err) - return err; - ug_info->phy_node = of_node_get(np); - } - /* Find the TBI PHY node. If it's not there, we don't support SGMII */ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); - /* get the phy interface type, or default to MII */ - prop = of_get_property(np, "phy-connection-type", NULL); - if (!prop) { - /* handle interface property present in old trees */ - prop = of_get_property(ug_info->phy_node, "interface", NULL); - if (prop != NULL) { - phy_interface = enet_to_phy_interface[*prop]; - max_speed = enet_to_speed[*prop]; - } else - phy_interface = PHY_INTERFACE_MODE_MII; - } else { - phy_interface = to_phy_interface((const char *)prop); - } - - /* get speed, or derive from PHY interface */ - if (max_speed == 0) - switch (phy_interface) { - case PHY_INTERFACE_MODE_GMII: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_TBI: - case PHY_INTERFACE_MODE_RTBI: - case PHY_INTERFACE_MODE_SGMII: - max_speed = SPEED_1000; - break; - default: - max_speed = SPEED_100; - break; + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (phy_node) { + prop = of_get_property(phy_node, "interface", NULL); + if (prop) { + dev_err(&ofdev->dev, + "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead."); + of_node_put(phy_node); + err = -EINVAL; + goto err_put_tbi; } + of_node_put(phy_node); + } + + err = of_get_phy_mode(np, &phy_interface); + if (err) { + dev_err(&ofdev->dev, "Invalid phy-connection-type"); + goto err_put_tbi; + } - if (max_speed == SPEED_1000) { + if (phy_interface == PHY_INTERFACE_MODE_GMII || + phy_interface_mode_is_rgmii(phy_interface) || + phy_interface == PHY_INTERFACE_MODE_TBI || + phy_interface == PHY_INTERFACE_MODE_RTBI || + phy_interface == PHY_INTERFACE_MODE_SGMII) { unsigned int snums = qe_get_num_of_snums(); - /* configure muram FIFOs for gigabit operation */ ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; @@ -3691,7 +3519,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth)); if (!dev) { err = -ENOMEM; - goto err_deregister_fixed_link; + goto err_put_tbi; } ugeth = netdev_priv(dev); @@ -3718,23 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev) dev->max_mtu = 1518; ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); - ugeth->phy_interface = phy_interface; - ugeth->max_speed = max_speed; - /* Carrier starts down, phylib will bring it up */ - netif_carrier_off(dev); + ugeth->phylink_config.dev = &dev->dev; + ugeth->phylink_config.type = PHYLINK_NETDEV; + + ugeth->phylink_config.mac_capabilities = + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + + __set_bit(PHY_INTERFACE_MODE_MII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + ugeth->phylink_config.supported_interfaces); + phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces); + + if (ug_info->tbi_node) { + __set_bit(PHY_INTERFACE_MODE_SGMII, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_TBI, + ugeth->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RTBI, + ugeth->phylink_config.supported_interfaces); + } + + phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev), + phy_interface, &ugeth_mac_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_put_tbi; + } + + ugeth->phylink = phylink; err = devm_register_netdev(&ofdev->dev, dev); if (err) { if (netif_msg_probe(ugeth)) pr_err("%s: Cannot register net device, aborting\n", dev->name); - goto err_deregister_fixed_link; + goto err_destroy_phylink; } err = of_get_ethdev_address(np, dev); if (err == -EPROBE_DEFER) - goto err_deregister_fixed_link; + goto err_destroy_phylink; ugeth->ug_info = ug_info; ugeth->dev = device; @@ -3743,11 +3598,11 @@ static int ucc_geth_probe(struct platform_device* ofdev) return 0; -err_deregister_fixed_link: - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); +err_destroy_phylink: + phylink_destroy(phylink); +err_put_tbi: of_node_put(ug_info->tbi_node); - of_node_put(ug_info->phy_node); + return err; } @@ -3755,13 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(dev); - struct device_node *np = ofdev->dev.of_node; ucc_geth_memclean(ugeth); - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); + phylink_destroy(ugeth->phylink); of_node_put(ugeth->ug_info->tbi_node); - of_node_put(ugeth->ug_info->phy_node); } static const struct of_device_id ucc_geth_match[] = { diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 4294ed096ebb..38789faae706 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/list.h> +#include <linux/phylink.h> #include <linux/if_ether.h> #include <soc/fsl/qe/immap_qe.h> @@ -921,7 +922,8 @@ struct ucc_geth_hardware_statistics { #define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1 #define UCC_GETH_MACCFG1_INIT 0 -#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) +#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \ + (7 << MACCFG2_PREL_SHIFT)) /* Ethernet Address Type. */ enum enet_addr_type { @@ -1073,6 +1075,9 @@ struct ucc_geth_tad_params { u16 vid; }; +struct phylink; +struct phylink_config; + /* GETH protocol initialization structure */ struct ucc_geth_info { struct ucc_fast_info uf_info; @@ -1088,7 +1093,6 @@ struct ucc_geth_info { u8 miminumInterFrameGapEnforcement; u8 backToBackInterFrameGap; int ipAddressAlignment; - int lengthCheckRx; u32 mblinterval; u16 nortsrbytetime; u8 fracsiz; @@ -1114,7 +1118,6 @@ struct ucc_geth_info { int transmitFlowControl; u8 maxGroupAddrInHash; u8 maxIndAddrInHash; - u8 prel; u16 maxFrameLength; u16 minFrameLength; u16 maxD1Length; @@ -1125,7 +1128,6 @@ struct ucc_geth_info { u32 eventRegMask; u16 pausePeriod; u16 extensionField; - struct device_node *phy_node; struct device_node *tbi_node; u8 weightfactor[NUM_TX_QUEUES]; u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; @@ -1210,14 +1212,12 @@ struct ucc_geth_private { u16 skb_dirtytx[NUM_TX_QUEUES]; struct ugeth_mii_info *mii_info; - struct phy_device *phydev; - phy_interface_t phy_interface; - int max_speed; uint32_t msg_enable; - int oldspeed; - int oldduplex; - int oldlink; - int wol_en; + u32 wol_en; + u32 phy_wol_en; + + struct phylink *phylink; + struct phylink_config phylink_config; struct device_node *node; }; diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 699f346faf5c..1fb49e5a414a 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -104,14 +104,8 @@ static int uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (!phydev) - return -ENODEV; - - phy_ethtool_ksettings_get(phydev, cmd); - - return 0; + return phylink_ethtool_ksettings_get(ugeth->phylink, cmd); } static int @@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (!phydev) - return -ENODEV; - - return phy_ethtool_ksettings_set(phydev, cmd); + return phylink_ethtool_ksettings_set(ugeth->phylink, cmd); } static void @@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev, { struct ucc_geth_private *ugeth = netdev_priv(netdev); - pause->autoneg = ugeth->phydev->autoneg; - - if (ugeth->ug_info->receiveFlowControl) - pause->rx_pause = 1; - if (ugeth->ug_info->transmitFlowControl) - pause->tx_pause = 1; + return phylink_ethtool_get_pauseparam(ugeth->phylink, pause); } static int @@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - int ret = 0; ugeth->ug_info->receiveFlowControl = pause->rx_pause; ugeth->ug_info->transmitFlowControl = pause->tx_pause; - if (ugeth->phydev->autoneg) { - if (netif_running(netdev)) { - /* FIXME: automatically restart */ - netdev_info(netdev, "Please re-open the interface\n"); - } - } else { - struct ucc_geth_info *ug_info = ugeth->ug_info; - - ret = init_flow_control_params(ug_info->aufc, - ug_info->receiveFlowControl, - ug_info->transmitFlowControl, - ug_info->pausePeriod, - ug_info->extensionField, - &ugeth->uccf->uf_regs->upsmr, - &ugeth->ug_regs->uempr, - &ugeth->ug_regs->maccfg1); - } - - return ret; + return phylink_ethtool_set_pauseparam(ugeth->phylink, pause); } static uint32_t @@ -343,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev, static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; - if (phydev && phydev->irq) - wol->supported |= WAKE_PHY; + phylink_ethtool_get_wol(ugeth->phylink, wol); + if (qe_alive_during_sleep()) wol->supported |= WAKE_MAGIC; - wol->wolopts = ugeth->wol_en; + wol->wolopts |= ugeth->wol_en; } static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); - struct phy_device *phydev = ugeth->phydev; + int ret = 0; - if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) - return -EINVAL; - else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq)) + ret = phylink_ethtool_set_wol(ugeth->phylink, wol); + if (ret == -EOPNOTSUPP) { + ugeth->phy_wol_en = 0; + } else if (ret) { + return ret; + } else { + ugeth->phy_wol_en = wol->wolopts; + goto out; + } + + /* If the PHY isn't handling the WoL and the MAC is asked to more than + * WAKE_MAGIC, error-out + */ + if (!ugeth->phy_wol_en && + wol->wolopts & ~WAKE_MAGIC) return -EINVAL; - else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep()) + + if (wol->wolopts & WAKE_MAGIC && + !qe_alive_during_sleep()) return -EINVAL; +out: ugeth->wol_en = wol->wolopts; device_set_wakeup_enable(&netdev->dev, ugeth->wol_en); diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index dd92949bb214..78d2a19593d1 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -1116,6 +1116,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) return gve_xdp_tx_queue_id(priv, 0); } +static inline bool gve_supports_xdp_xmit(struct gve_priv *priv) +{ + switch (priv->queue_format) { + case GVE_GQI_QPL_FORMAT: + return true; + default: + return false; + } +} + /* gqi napi handler defined in gve_main.c */ int gve_napi_poll(struct napi_struct *napi, int budget); @@ -1140,6 +1150,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_xdp_poll(struct gve_notify_block *block, int budget); +int gve_xsk_tx_poll(struct gve_notify_block *block, int budget); int gve_tx_alloc_rings_gqi(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg); void gve_tx_free_rings_gqi(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 060e0e674938..aa7d723011d0 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -1128,20 +1128,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id) return gve_adminq_execute_cmd(priv, &cmd); } -int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu) -{ - union gve_adminq_command cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER); - cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) { - .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU), - .parameter_value = cpu_to_be64(mtu), - }; - - return gve_adminq_execute_cmd(priv, &cmd); -} - int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, dma_addr_t stats_report_addr, u64 interval) { diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 863683de9694..228217458275 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -612,7 +612,6 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_register_page_list(struct gve_priv *priv, struct gve_queue_page_list *qpl); int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id); -int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu); int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, dma_addr_t stats_report_addr, u64 interval); int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index e171ca248f9a..92237fb0b60c 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget) if (block->rx) { work_done = gve_rx_poll(block, budget); + + /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of + * TX and RX work done. + */ + if (priv->xdp_prog) + work_done = max_t(int, work_done, + gve_xsk_tx_poll(block, budget)); + reschedule |= work_done == budget; } @@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv) static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { + int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0; + cfg->qcfg = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); cfg->ring_size = priv->tx_desc_cnt; cfg->start_idx = 0; - cfg->num_rings = gve_num_tx_queues(priv); + cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues; cfg->tx = priv->tx; } @@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev, if (err) return err; - /* If XDP prog is not installed, return */ - if (!priv->xdp_prog) + /* If XDP prog is not installed or interface is down, return. */ + if (!priv->xdp_prog || !netif_running(dev)) return 0; rx = &priv->rx[qid]; @@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev, if (qid >= priv->rx_cfg.num_queues) return -EINVAL; - /* If XDP prog is not installed, unmap DMA and return */ - if (!priv->xdp_prog) - goto done; - - tx_qid = gve_xdp_tx_queue_id(priv, qid); - if (!netif_running(dev)) { - priv->rx[qid].xsk_pool = NULL; - xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); - priv->tx[tx_qid].xsk_pool = NULL; + /* If XDP prog is not installed or interface is down, unmap DMA and + * return. + */ + if (!priv->xdp_prog || !netif_running(dev)) goto done; - } napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi; napi_disable(napi_rx); /* make sure current rx poll is done */ + tx_qid = gve_xdp_tx_queue_id(priv, qid); napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi; napi_disable(napi_tx); /* make sure current tx poll is done */ @@ -1709,24 +1714,20 @@ done: static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct gve_priv *priv = netdev_priv(dev); - int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id); + struct napi_struct *napi; + + if (!gve_get_napi_enabled(priv)) + return -ENETDOWN; if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog) return -EINVAL; - if (flags & XDP_WAKEUP_TX) { - struct gve_tx_ring *tx = &priv->tx[tx_queue_id]; - struct napi_struct *napi = - &priv->ntfy_blocks[tx->ntfy_id].napi; - - if (!napi_if_scheduled_mark_missed(napi)) { - /* Call local_bh_enable to trigger SoftIRQ processing */ - local_bh_disable(); - napi_schedule(napi); - local_bh_enable(); - } - - tx->xdp_xsk_wakeup++; + napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi; + if (!napi_if_scheduled_mark_missed(napi)) { + /* Call local_bh_enable to trigger SoftIRQ processing */ + local_bh_disable(); + napi_schedule(napi); + local_bh_enable(); } return 0; @@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv, { struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; + int num_xdp_queues; int err; gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); @@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv, rx_alloc_cfg.qcfg = &new_rx_config; tx_alloc_cfg.num_rings = new_tx_config.num_queues; + /* Add dedicated XDP TX queues if enabled. */ + num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0; + tx_alloc_cfg.num_rings += num_xdp_queues; + if (netif_running(priv->dev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); return err; @@ -1897,8 +1903,13 @@ static void gve_turndown(struct gve_priv *priv) /* Stop tx queues */ netif_tx_disable(priv->dev); + xdp_features_clear_redirect_target(priv->dev); + gve_clear_napi_enabled(priv); gve_clear_report_stats(priv); + + /* Make sure that all traffic is finished processing. */ + synchronize_net(); } static void gve_turnup(struct gve_priv *priv) @@ -1963,6 +1974,9 @@ static void gve_turnup(struct gve_priv *priv) napi_schedule(&block->napi); } + if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv)) + xdp_features_set_redirect_target(priv->dev, false); + gve_set_napi_enabled(priv); } @@ -2232,14 +2246,17 @@ static void gve_service_task(struct work_struct *work) static void gve_set_netdev_xdp_features(struct gve_priv *priv) { + xdp_features_t xdp_features; + if (priv->queue_format == GVE_GQI_QPL_FORMAT) { - priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC; - priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; - priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT; - priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + xdp_features = NETDEV_XDP_ACT_BASIC; + xdp_features |= NETDEV_XDP_ACT_REDIRECT; + xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; } else { - priv->dev->xdp_features = 0; + xdp_features = 0; } + + xdp_set_features_flag(priv->dev, xdp_features); } static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index e7fb7d6d283d..4350ebd9c2bd 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx) return; gve_remove_napi(priv, ntfy_idx); - gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); + if (tx->q_num < priv->tx_cfg.num_queues) + gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); + else + gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); netdev_tx_reset_queue(tx->netdev_txq); gve_tx_remove_from_block(priv, idx); } @@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct gve_tx_ring *tx; int i, err = 0, qid; - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog) return -EINVAL; + if (!gve_get_napi_enabled(priv)) + return -ENETDOWN; + qid = gve_xdp_tx_queue_id(priv, smp_processor_id() % priv->num_xdp_queues); @@ -975,33 +981,41 @@ out: return sent; } +int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget) +{ + struct gve_rx_ring *rx = rx_block->rx; + struct gve_priv *priv = rx->gve; + struct gve_tx_ring *tx; + int sent = 0; + + tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)]; + if (tx->xsk_pool) { + sent = gve_xsk_tx(priv, tx, budget); + + u64_stats_update_begin(&tx->statss); + tx->xdp_xsk_sent += sent; + u64_stats_update_end(&tx->statss); + if (xsk_uses_need_wakeup(tx->xsk_pool)) + xsk_set_tx_need_wakeup(tx->xsk_pool); + } + + return sent; +} + bool gve_xdp_poll(struct gve_notify_block *block, int budget) { struct gve_priv *priv = block->priv; struct gve_tx_ring *tx = block->tx; u32 nic_done; - bool repoll; u32 to_do; /* Find out how much work there is to be done */ nic_done = gve_tx_load_event_counter(priv, tx); to_do = min_t(u32, (nic_done - tx->done), budget); gve_clean_xdp_done(priv, tx, to_do); - repoll = nic_done != tx->done; - - if (tx->xsk_pool) { - int sent = gve_xsk_tx(priv, tx, budget); - - u64_stats_update_begin(&tx->statss); - tx->xdp_xsk_sent += sent; - u64_stats_update_end(&tx->statss); - repoll |= (sent == budget); - if (xsk_uses_need_wakeup(tx->xsk_pool)) - xsk_set_tx_need_wakeup(tx->xsk_pool); - } /* If we still have work we want to repoll */ - return repoll; + return nic_done != tx->done; } bool gve_tx_poll(struct gve_notify_block *block, int budget) diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index f879426cb552..394debc62268 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -1146,8 +1146,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv, /* jiffies can wraparound but time comparisons can handle overflows. */ pending_packet->timeout_jiffies = jiffies + - msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT * - MSEC_PER_SEC); + secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT); add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet); *bytes += pending_packet->skb->len; @@ -1191,8 +1190,7 @@ static void remove_miss_completions(struct gve_priv *priv, pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL; pending_packet->timeout_jiffies = jiffies + - msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT * - MSEC_PER_SEC); + secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT); /* Maintain pending packet in another list so the packet can be * unallocated at a later time. */ diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile index ae58ac38c206..7ea15f9ef849 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile +++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_HIBMCGE) += hibmcge.o -hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o +hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \ + hbg_debugfs.o hbg_err.o diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index 96daf058d387..b4300d8ea4ad 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -4,6 +4,7 @@ #ifndef __HBG_COMMON_H #define __HBG_COMMON_H +#include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/pci.h> #include "hbg_reg.h" @@ -33,6 +34,14 @@ enum hbg_tx_state { enum hbg_nic_state { HBG_NIC_STATE_EVENT_HANDLING = 0, + HBG_NIC_STATE_RESETTING, + HBG_NIC_STATE_RESET_FAIL, +}; + +enum hbg_reset_type { + HBG_RESET_TYPE_NONE = 0, + HBG_RESET_TYPE_FLR, + HBG_RESET_TYPE_FUNCTION, }; struct hbg_buffer { @@ -84,6 +93,7 @@ struct hbg_dev_specs { u32 vlan_layers; u32 max_mtu; u32 min_mtu; + u32 uc_mac_num; u32 max_frame_len; u32 rx_buf_size; @@ -114,6 +124,22 @@ struct hbg_mac { u32 duplex; u32 autoneg; u32 link_status; + u32 pause_autoneg; +}; + +struct hbg_mac_table_entry { + u8 addr[ETH_ALEN]; +}; + +struct hbg_mac_filter { + struct hbg_mac_table_entry *mac_table; + u32 table_max_len; + bool enabled; +}; + +/* saved for restore after rest */ +struct hbg_user_def { + struct ethtool_pauseparam pause_param; }; struct hbg_priv { @@ -126,6 +152,9 @@ struct hbg_priv { struct hbg_vector vectors; struct hbg_ring tx_ring; struct hbg_ring rx_ring; + struct hbg_mac_filter filter; + enum hbg_reset_type reset_type; + struct hbg_user_def user_def; }; #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c new file mode 100644 index 000000000000..8473c43d171a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/etherdevice.h> +#include <linux/seq_file.h> +#include <linux/string_choices.h> +#include "hbg_common.h" +#include "hbg_debugfs.h" +#include "hbg_hw.h" +#include "hbg_irq.h" +#include "hbg_txrx.h" + +static struct dentry *hbg_dbgfs_root; + +struct hbg_dbg_info { + const char *name; + int (*read)(struct seq_file *seq, void *data); +}; + +#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state)) + +static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring, + struct seq_file *s) +{ + u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B : + HBG_INT_MSK_RX_B; + + seq_printf(s, "ring used num: %u\n", + hbg_get_queue_used_num(ring)); + seq_printf(s, "ring max num: %u\n", ring->len); + seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail); + seq_printf(s, "fifo used num: %u\n", + hbg_hw_get_fifo_used_num(priv, ring->dir)); + seq_printf(s, "fifo max num: %u\n", + hbg_get_spec_fifo_max_num(priv, ring->dir)); + seq_printf(s, "irq enabled: %s\n", + str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask))); +} + +static int hbg_dbg_tx_ring(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_dbg_ring(priv, &priv->tx_ring, s); + return 0; +} + +static int hbg_dbg_rx_ring(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_dbg_ring(priv, &priv->rx_ring, s); + return 0; +} + +static int hbg_dbg_irq_info(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_irq_info *info; + u32 i; + + for (i = 0; i < priv->vectors.info_array_len; i++) { + info = &priv->vectors.info_array[i]; + seq_printf(s, + "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n", + info->name, + str_true_false(hbg_hw_irq_is_enabled(priv, + info->mask)), + str_true_false(info->need_print), + info->count); + } + + return 0; +} + +static int hbg_dbg_mac_table(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_mac_filter *filter; + u32 i; + + filter = &priv->filter; + seq_printf(s, "mac addr max count: %u\n", filter->table_max_len); + seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled)); + + for (i = 0; i < filter->table_max_len; i++) { + if (is_zero_ether_addr(filter->mac_table[i].addr)) + continue; + + seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr); + } + + return 0; +} + +static const char * const reset_type_str[] = {"None", "FLR", "Function"}; + +static int hbg_dbg_nic_state(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + + seq_printf(s, "event handling state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING)); + seq_printf(s, "resetting state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_RESETTING)); + seq_printf(s, "reset fail state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL)); + seq_printf(s, "last reset type: %s\n", + reset_type_str[priv->reset_type]); + + return 0; +} + +static const struct hbg_dbg_info hbg_dbg_infos[] = { + { "tx_ring", hbg_dbg_tx_ring }, + { "rx_ring", hbg_dbg_rx_ring }, + { "irq_info", hbg_dbg_irq_info }, + { "mac_table", hbg_dbg_mac_table }, + { "nic_state", hbg_dbg_nic_state }, +}; + +static void hbg_debugfs_uninit(void *data) +{ + debugfs_remove_recursive((struct dentry *)data); +} + +void hbg_debugfs_init(struct hbg_priv *priv) +{ + const char *name = pci_name(priv->pdev); + struct device *dev = &priv->pdev->dev; + struct dentry *root; + u32 i; + + root = debugfs_create_dir(name, hbg_dbgfs_root); + + for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++) + debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name, + root, hbg_dbg_infos[i].read); + + /* Ignore the failure because debugfs is not a key feature. */ + devm_add_action_or_reset(dev, hbg_debugfs_uninit, root); +} + +void hbg_debugfs_register(void) +{ + hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL); +} + +void hbg_debugfs_unregister(void) +{ + debugfs_remove_recursive(hbg_dbgfs_root); + hbg_dbgfs_root = NULL; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h new file mode 100644 index 000000000000..80670d66bbeb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_DEBUGFS_H +#define __HBG_DEBUGFS_H + +void hbg_debugfs_register(void); +void hbg_debugfs_unregister(void); + +void hbg_debugfs_init(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c new file mode 100644 index 000000000000..4d1f4a33391a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/rtnetlink.h> +#include "hbg_common.h" +#include "hbg_err.h" +#include "hbg_hw.h" + +static void hbg_restore_mac_table(struct hbg_priv *priv) +{ + struct hbg_mac_filter *filter = &priv->filter; + u64 addr; + u32 i; + + for (i = 0; i < filter->table_max_len; i++) + if (!is_zero_ether_addr(filter->mac_table[i].addr)) { + addr = ether_addr_to_u64(filter->mac_table[i].addr); + hbg_hw_set_uc_addr(priv, addr, i); + } + + hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled); +} + +static void hbg_restore_user_def_settings(struct hbg_priv *priv) +{ + struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param; + + hbg_restore_mac_table(priv); + hbg_hw_set_mtu(priv, priv->netdev->mtu); + hbg_hw_set_pause_enable(priv, pause_param->tx_pause, + pause_param->rx_pause); +} + +int hbg_rebuild(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_hw_init(priv); + if (ret) + return ret; + + hbg_restore_user_def_settings(priv); + return 0; +} + +static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) +{ + int ret; + + ASSERT_RTNL(); + + if (netif_running(priv->netdev)) { + dev_warn(&priv->pdev->dev, + "failed to reset because port is up\n"); + return -EBUSY; + } + + priv->reset_type = type; + set_bit(HBG_NIC_STATE_RESETTING, &priv->state); + clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET); + if (ret) { + set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); + } + + return ret; +} + +static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type) +{ + int ret; + + if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) || + type != priv->reset_type) + return 0; + + ASSERT_RTNL(); + + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); + ret = hbg_rebuild(priv); + if (ret) { + set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + dev_err(&priv->pdev->dev, "failed to rebuild after reset\n"); + return ret; + } + + dev_info(&priv->pdev->dev, "reset done\n"); + return ret; +} + +/* must be protected by rtnl lock */ +int hbg_reset(struct hbg_priv *priv) +{ + int ret; + + ASSERT_RTNL(); + ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION); + if (ret) + return ret; + + return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION); +} + +static void hbg_pci_err_reset_prepare(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + + rtnl_lock(); + hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR); +} + +static void hbg_pci_err_reset_done(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_reset_done(priv, HBG_RESET_TYPE_FLR); + rtnl_unlock(); +} + +static const struct pci_error_handlers hbg_pci_err_handler = { + .reset_prepare = hbg_pci_err_reset_prepare, + .reset_done = hbg_pci_err_reset_done, +}; + +void hbg_set_pci_err_handler(struct pci_driver *pdrv) +{ + pdrv->err_handler = &hbg_pci_err_handler; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h new file mode 100644 index 000000000000..d7828e446308 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_ERR_H +#define __HBG_ERR_H + +#include <linux/pci.h> + +void hbg_set_pci_err_handler(struct pci_driver *pdrv); +int hbg_reset(struct hbg_priv *priv); +int hbg_rebuild(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c index c3370114aef3..00364a438ec2 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -3,12 +3,193 @@ #include <linux/ethtool.h> #include <linux/phy.h> +#include <linux/rtnetlink.h> +#include "hbg_common.h" +#include "hbg_err.h" #include "hbg_ethtool.h" +#include "hbg_hw.h" + +enum hbg_reg_dump_type { + HBG_DUMP_REG_TYPE_SPEC = 0, + HBG_DUMP_REG_TYPE_MDIO, + HBG_DUMP_REG_TYPE_GMAC, + HBG_DUMP_REG_TYPE_PCU, +}; + +struct hbg_reg_info { + u32 type; + u32 offset; + u32 val; +}; + +#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0} +#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0} +#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0} +#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0} + +static const struct hbg_reg_info hbg_dump_reg_infos[] = { + /* dev specs */ + HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR), + + /* mdio */ + HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR), + + /* gmac */ + HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR), + + /* pcu */ + HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR), + HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR), + HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR), + HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR), + HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR), +}; + +static const u32 hbg_dump_type_base_array[] = { + [HBG_DUMP_REG_TYPE_SPEC] = 0, + [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE, + [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE, + [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE, +}; + +static int hbg_ethtool_get_regs_len(struct net_device *netdev) +{ + return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info); +} + +static void hbg_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *data) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_reg_info *info; + u32 i, offset = 0; + + regs->version = 0; + for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) { + info = data + offset; + + *info = hbg_dump_reg_infos[i]; + info->val = hbg_reg_read(priv, info->offset); + info->offset -= hbg_dump_type_base_array[info->type]; + + offset += sizeof(*info); + } +} + +static void hbg_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *param) +{ + struct hbg_priv *priv = netdev_priv(net_dev); + + param->autoneg = priv->mac.pause_autoneg; + hbg_hw_get_pause_enable(priv, ¶m->tx_pause, ¶m->rx_pause); +} + +static int hbg_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *param) +{ + struct hbg_priv *priv = netdev_priv(net_dev); + + priv->mac.pause_autoneg = param->autoneg; + phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause); + + if (!param->autoneg) + hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause); + + priv->user_def.pause_param = *param; + return 0; +} + +static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + if (*flags != ETH_RESET_DEDICATED) + return -EOPNOTSUPP; + + *flags = 0; + return hbg_reset(priv); +} static const struct ethtool_ops hbg_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_regs_len = hbg_ethtool_get_regs_len, + .get_regs = hbg_ethtool_get_regs, + .get_pauseparam = hbg_ethtool_get_pauseparam, + .set_pauseparam = hbg_ethtool_set_pauseparam, + .reset = hbg_ethtool_reset, + .nway_reset = phy_ethtool_nway_reset, }; void hbg_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index 05295c2ad439..e7798f213645 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -3,6 +3,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> +#include <linux/if_vlan.h> #include <linux/iopoll.h> #include <linux/minmax.h> #include "hbg_common.h" @@ -67,6 +68,8 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv) specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR); specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR); specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR); + specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR); + mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR); u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data); @@ -135,9 +138,13 @@ void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable) hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value); } -void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr) +void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index) { - hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr); + u32 addr; + + /* mac addr is u64, so the addr offset is 0x8 */ + addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8); + hbg_reg_write64(priv, addr, mac_addr); } static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv, @@ -161,8 +168,13 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv, void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu) { - hbg_hw_set_pcu_max_frame_len(priv, mtu); - hbg_hw_set_mac_max_frame_len(priv, mtu); + u32 frame_len; + + frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers + + ETH_HLEN + ETH_FCS_LEN; + + hbg_hw_set_pcu_max_frame_len(priv, frame_len); + hbg_hw_set_mac_max_frame_len(priv, frame_len); } void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable) @@ -207,6 +219,34 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) HBG_REG_DUPLEX_B, duplex); } +/* only support uc filter */ +void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable) +{ + hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR, + HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable); +} + +void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en) +{ + hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_TX_B, tx_en); + hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_RX_B, rx_en); +} + +void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en) +{ + *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_TX_B); + *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_RX_B); +} + +void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr) +{ + hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr); +} + static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv) { u32 ctrl = 0; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h index 14fb39241c93..a4a049b5121d 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h @@ -51,9 +51,13 @@ bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask); void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable); void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu); void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable); -void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr); +void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index); u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir); void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc); void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr); +void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable); +void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en); +void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en); +void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 75505fb5cc4a..bb0f25ac9760 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -6,13 +6,13 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include "hbg_common.h" +#include "hbg_err.h" #include "hbg_ethtool.h" #include "hbg_hw.h" #include "hbg_irq.h" #include "hbg_mdio.h" #include "hbg_txrx.h" - -static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu); +#include "hbg_debugfs.h" static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled) { @@ -55,11 +55,7 @@ static int hbg_hw_txrx_clear(struct hbg_priv *priv) return ret; /* After reset, regs need to be reconfigured */ - hbg_hw_init(priv); - hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr)); - hbg_change_mtu(priv, priv->netdev->mtu); - - return 0; + return hbg_rebuild(priv); } static int hbg_net_stop(struct net_device *netdev) @@ -74,31 +70,127 @@ static int hbg_net_stop(struct net_device *netdev) return hbg_hw_txrx_clear(priv); } +static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC, + * The MAC filter will be enabled. + * Otherwise the filter will be disabled. + */ + priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC)); + hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled); +} + +static void hbg_set_mac_to_mac_table(struct hbg_priv *priv, + u32 index, const u8 *addr) +{ + if (addr) { + ether_addr_copy(priv->filter.mac_table[index].addr, addr); + hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index); + } else { + eth_zero_addr(priv->filter.mac_table[index].addr); + hbg_hw_set_uc_addr(priv, 0, index); + } +} + +static int hbg_get_index_from_mac_table(struct hbg_priv *priv, + const u8 *addr, u32 *index) +{ + u32 i; + + for (i = 0; i < priv->filter.table_max_len; i++) + if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) { + *index = i; + return 0; + } + + return -EINVAL; +} + +static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr) +{ + u32 index; + + /* already exists */ + if (!hbg_get_index_from_mac_table(priv, addr, &index)) + return 0; + + for (index = 0; index < priv->filter.table_max_len; index++) + if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) { + hbg_set_mac_to_mac_table(priv, index, addr); + return 0; + } + + return -ENOSPC; +} + +static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr) +{ + u32 index; + + /* not exists */ + if (hbg_get_index_from_mac_table(priv, addr, &index)) + return; + + hbg_set_mac_to_mac_table(priv, index, NULL); +} + +static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + return hbg_add_mac_to_filter(priv, addr); +} + +static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + if (ether_addr_equal(netdev->dev_addr, (u8 *)addr)) + return 0; + + hbg_del_mac_from_filter(priv, addr); + return 0; +} + +static void hbg_net_set_rx_mode(struct net_device *netdev) +{ + int ret; + + ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync); + + /* If ret != 0, overflow has occurred */ + hbg_update_promisc_mode(netdev, !!ret); +} + static int hbg_net_set_mac_address(struct net_device *netdev, void *addr) { struct hbg_priv *priv = netdev_priv(netdev); u8 *mac_addr; + bool exists; + u32 index; mac_addr = ((struct sockaddr *)addr)->sa_data; if (!is_valid_ether_addr(mac_addr)) return -EADDRNOTAVAIL; - hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr)); - dev_addr_set(netdev, mac_addr); + /* The index of host mac is always 0. + * If new mac address already exists, + * delete the existing mac address and + * add it to the position with index 0. + */ + exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index); + hbg_set_mac_to_mac_table(priv, 0, mac_addr); + if (exists) + hbg_set_mac_to_mac_table(priv, index, NULL); + hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr)); + dev_addr_set(netdev, mac_addr); return 0; } -static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu) -{ - u32 frame_len; - - frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers + - ETH_HLEN + ETH_FCS_LEN; - hbg_hw_set_mtu(priv, frame_len); -} - static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu) { struct hbg_priv *priv = netdev_priv(netdev); @@ -106,7 +198,7 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) return -EBUSY; - hbg_change_mtu(priv, new_mtu); + hbg_hw_set_mtu(priv, new_mtu); WRITE_ONCE(netdev->mtu, new_mtu); dev_dbg(&priv->pdev->dev, @@ -142,8 +234,39 @@ static const struct net_device_ops hbg_netdev_ops = { .ndo_set_mac_address = hbg_net_set_mac_address, .ndo_change_mtu = hbg_net_change_mtu, .ndo_tx_timeout = hbg_net_tx_timeout, + .ndo_set_rx_mode = hbg_net_set_rx_mode, }; +static int hbg_mac_filter_init(struct hbg_priv *priv) +{ + struct hbg_dev_specs *dev_specs = &priv->dev_specs; + struct hbg_mac_filter *filter = &priv->filter; + struct hbg_mac_table_entry *tmp_table; + + tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num, + sizeof(*tmp_table), GFP_KERNEL); + if (!tmp_table) + return -ENOMEM; + + filter->mac_table = tmp_table; + filter->table_max_len = dev_specs->uc_mac_num; + filter->enabled = true; + + hbg_hw_set_mac_filter_enable(priv, filter->enabled); + return 0; +} + +static void hbg_init_user_def(struct hbg_priv *priv) +{ + struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param; + + priv->mac.pause_autoneg = HBG_STATUS_ENABLE; + + pause_param->autoneg = priv->mac.pause_autoneg; + hbg_hw_get_pause_enable(priv, &pause_param->tx_pause, + &pause_param->rx_pause); +} + static int hbg_init(struct hbg_priv *priv) { int ret; @@ -160,7 +283,17 @@ static int hbg_init(struct hbg_priv *priv) if (ret) return ret; - return hbg_mdio_init(priv); + ret = hbg_mdio_init(priv); + if (ret) + return ret; + + ret = hbg_mac_filter_init(priv); + if (ret) + return ret; + + hbg_debugfs_init(priv); + hbg_init_user_def(priv); + return 0; } static int hbg_pci_init(struct pci_dev *pdev) @@ -216,13 +349,15 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; netdev->max_mtu = priv->dev_specs.max_mtu; netdev->min_mtu = priv->dev_specs.min_mtu; netdev->netdev_ops = &hbg_netdev_ops; netdev->watchdog_timeo = 5 * HZ; - hbg_change_mtu(priv, ETH_DATA_LEN); + hbg_hw_set_mtu(priv, ETH_DATA_LEN); hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr); hbg_ethtool_set_ops(netdev); @@ -245,7 +380,27 @@ static struct pci_driver hbg_driver = { .id_table = hbg_pci_tbl, .probe = hbg_probe, }; -module_pci_driver(hbg_driver); + +static int __init hbg_module_init(void) +{ + int ret; + + hbg_debugfs_register(); + hbg_set_pci_err_handler(&hbg_driver); + ret = pci_register_driver(&hbg_driver); + if (ret) + hbg_debugfs_unregister(); + + return ret; +} +module_init(hbg_module_init); + +static void __exit hbg_module_exit(void) +{ + pci_unregister_driver(&hbg_driver); + hbg_debugfs_unregister(); +} +module_exit(hbg_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c index a3479fba8501..db6bc4cfb971 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -114,6 +114,19 @@ static void hbg_mdio_init_hw(struct hbg_priv *priv) hbg_mdio_set_command(mac, cmd); } +static void hbg_flowctrl_cfg(struct hbg_priv *priv) +{ + struct phy_device *phydev = priv->mac.phydev; + bool rx_pause; + bool tx_pause; + + if (!priv->mac.pause_autoneg) + return; + + phy_get_pause(phydev, &tx_pause, &rx_pause); + hbg_hw_set_pause_enable(priv, tx_pause, rx_pause); +} + static void hbg_phy_adjust_link(struct net_device *netdev) { struct hbg_priv *priv = netdev_priv(netdev); @@ -140,6 +153,7 @@ static void hbg_phy_adjust_link(struct net_device *netdev) priv->mac.duplex = phydev->duplex; priv->mac.autoneg = phydev->autoneg; hbg_hw_adjust_link(priv, speed, phydev->duplex); + hbg_flowctrl_cfg(priv); } priv->mac.link_status = phydev->link; @@ -168,6 +182,7 @@ static int hbg_phy_connect(struct hbg_priv *priv) return ret; phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_support_asym_pause(phydev); phy_attached_info(phydev); return 0; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index 57d81c6d7633..f12efc12f3c5 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -10,6 +10,8 @@ #define HBG_REG_MAC_ID_ADDR 0x0008 #define HBG_REG_PHY_ID_ADDR 0x000C #define HBG_REG_MAC_ADDR_ADDR 0x0010 +#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014 +#define HBG_REG_UC_MAC_NUM_ADDR 0x0018 #define HBG_REG_MDIO_FREQ_ADDR 0x0024 #define HBG_REG_MAX_MTU_ADDR 0x0028 #define HBG_REG_MIN_MTU_ADDR 0x002C @@ -28,6 +30,7 @@ #define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10) #define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5) #define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0) +#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004) #define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008) #define HBG_REG_MDIO_WDATA_M GENMASK(15, 0) #define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C) @@ -36,6 +39,10 @@ /* GMAC */ #define HBG_REG_SGMII_BASE 0x10000 #define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008) +#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C) +#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C) +#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020) +#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024) #define HBG_REG_DUPLEX_B BIT(0) #define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C) #define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040) @@ -43,20 +50,42 @@ #define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044) #define HBG_REG_PORT_ENABLE_RX_B BIT(1) #define HBG_REG_PORT_ENABLE_TX_B BIT(2) +#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048) +#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0) +#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1) +#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058) #define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060) #define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7) #define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6) #define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5) +#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064) +#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0) +#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8) #define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0) #define HBG_REG_CF_CRC_STRIP_B BIT(0) #define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4) #define HBG_REG_MODE_CHANGE_EN_B BIT(0) +#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC) #define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0) #define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3) +#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8) +#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200) +#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204) +#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208) +#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C) #define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210) #define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214) +#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218) +#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C) +#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220) +#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224) +#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228) +#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C) /* PCU */ +#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420) +#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424) +#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428) #define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C) #define HBG_INT_MSK_WE_ERR_B BIT(31) #define HBG_INT_MSK_RBREQ_ERR_B BIT(30) @@ -78,11 +107,17 @@ #define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */ #define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434) #define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438) +#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C) +#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440) #define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444) #define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0) +#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450) +#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454) +#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458) #define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16) +#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470) #define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488) #define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C) #define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490) @@ -101,6 +136,10 @@ #define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0) #define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4) #define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21) +#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4) +#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8) +#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC) +#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688) #define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694) #define HBG_REG_IND_INTR_MASK_B BIT(0) #define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 851490346261..6b6ced37e490 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3019,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = { module_platform_driver(g_dsaf_driver); -/** - * hns_dsaf_roce_reset - reset dsaf and roce - * @dsaf_fwnode: Pointer to framework node for the dasf - * @dereset: false - request reset , true - drop reset - * return 0 - success , negative -fail - */ -int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) -{ - struct dsaf_device *dsaf_dev; - struct platform_device *pdev; - u32 mp; - u32 sl; - u32 credit; - int i; - static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { - {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, - {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, - {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, - {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, - {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, - {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, - {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, - {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, - }; - static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { - {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, - {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, - {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, - {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, - {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, - {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, - {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, - {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, - }; - - /* find the platform device corresponding to fwnode */ - if (is_of_node(dsaf_fwnode)) { - pdev = of_find_device_by_node(to_of_node(dsaf_fwnode)); - } else if (is_acpi_device_node(dsaf_fwnode)) { - pdev = hns_dsaf_find_platform_device(dsaf_fwnode); - } else { - pr_err("fwnode is neither OF or ACPI type\n"); - return -EINVAL; - } - - /* check if we were a success in fetching pdev */ - if (!pdev) { - pr_err("couldn't find platform device for node\n"); - return -ENODEV; - } - - /* retrieve the dsaf_device from the driver data */ - dsaf_dev = dev_get_drvdata(&pdev->dev); - if (!dsaf_dev) { - dev_err(&pdev->dev, "dsaf_dev is NULL\n"); - put_device(&pdev->dev); - return -ENODEV; - } - - /* now, make sure we are running on compatible SoC */ - if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { - dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", - dsaf_dev->ae_dev.name); - put_device(&pdev->dev); - return -ENODEV; - } - - /* do reset or de-reset according to the flag */ - if (!dereset) { - /* reset rocee-channels in dsaf and rocee */ - dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, - false); - dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false); - } else { - /* configure dsaf tx roce correspond to port map and sl map */ - mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG); - for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) - dsaf_set_field(mp, 7 << i * 3, i * 3, - port_map[i][DSAF_ROCE_6PORT_MODE]); - dsaf_set_field(mp, 3 << i * 3, i * 3, 0); - dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp); - - sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG); - for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) - dsaf_set_field(sl, 3 << i * 2, i * 2, - sl_map[i][DSAF_ROCE_6PORT_MODE]); - dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl); - - /* de-reset rocee-channels in dsaf and rocee */ - dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, - true); - msleep(SRST_TIME_INTERVAL); - dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true); - - /* enable dsaf channel rocee credit */ - credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG); - dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0); - dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); - - dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); - dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); - } - - put_device(&pdev->dev); - - return 0; -} -EXPORT_SYMBOL(hns_dsaf_roce_reset); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("HNS DSAF driver"); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 0eb03dff1a8b..653dfbb25d1b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -42,29 +42,6 @@ struct hns_mac_cb; #define HNS_MAX_WAIT_CNT 10000 -enum dsaf_roce_port_mode { - DSAF_ROCE_6PORT_MODE, - DSAF_ROCE_4PORT_MODE, - DSAF_ROCE_2PORT_MODE, - DSAF_ROCE_CHAN_MODE_NUM, -}; - -enum dsaf_roce_port_num { - DSAF_ROCE_PORT_0, - DSAF_ROCE_PORT_1, - DSAF_ROCE_PORT_2, - DSAF_ROCE_PORT_3, - DSAF_ROCE_PORT_4, - DSAF_ROCE_PORT_5, -}; - -enum dsaf_roce_qos_sl { - DSAF_ROCE_SL_0, - DSAF_ROCE_SL_1, - DSAF_ROCE_SL_2, - DSAF_ROCE_SL_3, -}; - #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP) @@ -307,9 +284,6 @@ struct dsaf_misc_op { void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset); - void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk, - bool dereset); - void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset); phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb); int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt); @@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, u8 port_num); int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); -int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); - #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 5df19c604d09..91391a49fcea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, HNS_XGE_RESET_FUNC, port, dereset); } -/** - * hns_dsaf_srst_chns - reset dsaf channels - * @dsaf_dev: dsaf device struct pointer - * @msk: xbar channels mask value: - * @dereset: false - request reset , true - drop reset - * - * bit0-5 for xge0-5 - * bit6-11 for ppe0-5 - * bit12-17 for roce0-5 - * bit18-19 for com/dfx - */ -static void -hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) -{ - u32 reg_addr; - - if (!dereset) - reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG; - else - reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG; - - dsaf_write_sub(dsaf_dev, reg_addr, msk); -} - -/** - * hns_dsaf_srst_chns_acpi - reset dsaf channels - * @dsaf_dev: dsaf device struct pointer - * @msk: xbar channels mask value: - * @dereset: false - request reset , true - drop reset - * - * bit0-5 for xge0-5 - * bit6-11 for ppe0-5 - * bit12-17 for roce0-5 - * bit18-19 for com/dfx - */ -static void -hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) -{ - hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, - HNS_DSAF_CHN_RESET_FUNC, - msk, dereset); -} - -static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset) -{ - if (!dereset) { - dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1); - } else { - dsaf_write_sub(dsaf_dev, - DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1); - dsaf_write_sub(dsaf_dev, - DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1); - msleep(20); - dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1); - } -} - -static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset) -{ - hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, - HNS_ROCE_RESET_FUNC, 0, dereset); -} - static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, bool dereset) { @@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) misc_op->ge_srst = hns_dsaf_ge_srst_by_port; misc_op->ppe_srst = hns_ppe_srst_by_port; misc_op->ppe_comm_srst = hns_ppe_com_srst; - misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns; - misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst; misc_op->get_phy_if = hns_mac_get_phy_if; misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; @@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi; misc_op->ppe_srst = hns_ppe_srst_by_port_acpi; misc_op->ppe_comm_srst = hns_ppe_com_srst; - misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi; - misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; misc_op->get_phy_if = hns_mac_get_phy_if_acpi; misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 46af467aa596..635b3a95dd82 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); } -void hns_rcb_start(struct hnae_queue *q, u32 val) -{ - hns_rcb_ring_enable_hw(q, val); -} - /** *hns_rcb_common_init_commit_hw - make rcb common init completed *@rcb_common: rcb common device diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 0f4cc184ef39..68f81547dfb4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size); int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index); void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index); int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); -void hns_rcb_start(struct hnae_queue *q, u32 val); int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, u16 *max_q_per_vf); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 9a63fbc69408..b25fb400f476 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare); */ static DEFINE_MUTEX(hnae3_common_lock); +/* ensure the drivers being unloaded one by one */ +static DEFINE_MUTEX(hnae3_unload_lock); + +void hnae3_acquire_unload_lock(void) +{ + mutex_lock(&hnae3_unload_lock); +} +EXPORT_SYMBOL(hnae3_acquire_unload_lock); + +void hnae3_release_unload_lock(void) +{ + mutex_unlock(&hnae3_unload_lock); +} +EXPORT_SYMBOL(hnae3_release_unload_lock); + static bool hnae3_client_match(enum hnae3_client_type client_type) { if (client_type == HNAE3_CLIENT_KNIC || diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 710a8f9f2248..4e44f28288f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -916,9 +916,6 @@ struct hnae3_handle { u8 netdev_flags; struct dentry *hnae3_dbgfs; - /* protects concurrent contention between debugfs commands */ - struct mutex dbgfs_lock; - char **dbgfs_buf; /* Network interface message level enabled bits */ u32 msg_enable; @@ -966,4 +963,6 @@ int hnae3_register_client(struct hnae3_client *client); void hnae3_set_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, unsigned int inited); +void hnae3_acquire_unload_lock(void); +void hnae3_release_unload_lock(void); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 807eb3bbb11c..9bbece25552b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1260,69 +1260,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - struct hns3_dbg_data *dbg_data = filp->private_data; + char *buf = filp->private_data; + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); +} + +static int hns3_dbg_open(struct inode *inode, struct file *filp) +{ + struct hns3_dbg_data *dbg_data = inode->i_private; struct hnae3_handle *handle = dbg_data->handle; struct hns3_nic_priv *priv = handle->priv; - ssize_t size = 0; - char **save_buf; - char *read_buf; u32 index; + char *buf; int ret; + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EBUSY; + ret = hns3_dbg_get_cmd_index(dbg_data, &index); if (ret) return ret; - mutex_lock(&handle->dbgfs_lock); - save_buf = &handle->dbgfs_buf[index]; - - if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || - test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { - ret = -EBUSY; - goto out; - } - - if (*save_buf) { - read_buf = *save_buf; - } else { - read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); - if (!read_buf) { - ret = -ENOMEM; - goto out; - } - - /* save the buffer addr until the last read operation */ - *save_buf = read_buf; - - /* get data ready for the first time to read */ - ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, - read_buf, hns3_dbg_cmd[index].buf_len); - if (ret) - goto out; - } + buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; - size = simple_read_from_buffer(buffer, count, ppos, read_buf, - strlen(read_buf)); - if (size > 0) { - mutex_unlock(&handle->dbgfs_lock); - return size; + ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, + buf, hns3_dbg_cmd[index].buf_len); + if (ret) { + kvfree(buf); + return ret; } -out: - /* free the buffer for the last read operation */ - if (*save_buf) { - kvfree(*save_buf); - *save_buf = NULL; - } + filp->private_data = buf; + return 0; +} - mutex_unlock(&handle->dbgfs_lock); - return ret; +static int hns3_dbg_release(struct inode *inode, struct file *filp) +{ + kvfree(filp->private_data); + filp->private_data = NULL; + return 0; } static const struct file_operations hns3_dbg_fops = { .owner = THIS_MODULE, - .open = simple_open, + .open = hns3_dbg_open, .read = hns3_dbg_read, + .release = hns3_dbg_release, }; static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) @@ -1379,13 +1365,6 @@ int hns3_dbg_init(struct hnae3_handle *handle) int ret; u32 i; - handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, - ARRAY_SIZE(hns3_dbg_cmd), - sizeof(*handle->dbgfs_buf), - GFP_KERNEL); - if (!handle->dbgfs_buf) - return -ENOMEM; - hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry = debugfs_create_dir(name, hns3_dbgfs_root); handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; @@ -1395,8 +1374,6 @@ int hns3_dbg_init(struct hnae3_handle *handle) debugfs_create_dir(hns3_dbg_dentry[i].name, handle->hnae3_dbgfs); - mutex_init(&handle->dbgfs_lock); - for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES && ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) || @@ -1425,24 +1402,13 @@ int hns3_dbg_init(struct hnae3_handle *handle) out: debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; - mutex_destroy(&handle->dbgfs_lock); return ret; } void hns3_dbg_uninit(struct hnae3_handle *handle) { - u32 i; - debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; - - for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) - if (handle->dbgfs_buf[i]) { - kvfree(handle->dbgfs_buf[i]); - handle->dbgfs_buf[i] = NULL; - } - - mutex_destroy(&handle->dbgfs_lock); } void hns3_dbg_register_debugfs(const char *debugfs_dir_name) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 43377a7b2426..9ff797fb36c4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2452,7 +2452,6 @@ static int hns3_nic_set_features(struct net_device *netdev, return ret; } - netdev->features = features; return 0; } @@ -6003,9 +6002,11 @@ module_init(hns3_init_module); */ static void __exit hns3_exit_module(void) { + hnae3_acquire_unload_lock(); pci_unregister_driver(&hns3_driver); hnae3_unregister_client(&client); hns3_dbg_unregister_debugfs(); + hnae3_release_unload_lock(); } module_exit(hns3_exit_module); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 05942fa78b11..3f17b3073e50 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -6,6 +6,7 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -3574,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, return ret; } +static void hclge_set_reset_pending(struct hclge_dev *hdev, + enum hnae3_reset_type reset_type) +{ + /* When an incorrect reset type is executed, the get_reset_level + * function generates the HNAE3_NONE_RESET flag. As a result, this + * type do not need to pending. + */ + if (reset_type != HNAE3_NONE_RESET) + set_bit(reset_type, &hdev->reset_pending); +} + static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; @@ -3594,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) */ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); - set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + hclge_set_reset_pending(hdev, HNAE3_IMP_RESET); set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); hdev->rst_stats.imp_rst_cnt++; @@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { dev_info(&hdev->pdev->dev, "global reset interrupt\n"); set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); - set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); + hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); hdev->rst_stats.global_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; @@ -3759,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev) snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", HCLGE_NAME, pci_name(hdev->pdev)); ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, - 0, hdev->misc_vector.name, hdev); + IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev); if (ret) { hclge_free_vector(hdev, 0); dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", @@ -4052,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev) case HNAE3_FUNC_RESET: dev_info(&pdev->dev, "PF reset requested\n"); /* schedule again to check later */ - set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); + hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET); hclge_reset_task_schedule(hdev); break; default: @@ -4086,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, clear_bit(HNAE3_FLR_RESET, addr); } + clear_bit(HNAE3_NONE_RESET, addr); + if (hdev->reset_type != HNAE3_NONE_RESET && rst_level < hdev->reset_type) return HNAE3_NONE_RESET; @@ -4227,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) return false; } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { hdev->rst_stats.reset_fail_cnt++; - set_bit(hdev->reset_type, &hdev->reset_pending); + hclge_set_reset_pending(hdev, hdev->reset_type); dev_info(&hdev->pdev->dev, "re-schedule reset task(%u)\n", hdev->rst_stats.reset_fail_cnt); @@ -4470,8 +4484,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, enum hnae3_reset_type rst_type) { +#define HCLGE_SUPPORT_RESET_TYPE \ + (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \ + BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET)) + struct hclge_dev *hdev = ae_dev->priv; + if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) { + /* To prevent reset triggered by hclge_reset_event */ + set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); + dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n", + rst_type); + return; + } + set_bit(rst_type, &hdev->default_reset_request); } @@ -11881,9 +11907,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_init_rxd_adv_layout(hdev); - /* Enable MISC vector(vector0) */ - hclge_enable_vector(&hdev->misc_vector, true); - ret = hclge_init_wol(hdev); if (ret) dev_warn(&pdev->dev, @@ -11896,6 +11919,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_state_init(hdev); hdev->last_reset_time = jiffies; + /* Enable MISC vector(vector0) */ + enable_irq(hdev->misc_vector.vector_irq); + hclge_enable_vector(&hdev->misc_vector, true); + dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -12301,7 +12328,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); - synchronize_irq(hdev->misc_vector.vector_irq); + disable_irq(hdev->misc_vector.vector_irq); /* Disable all hw interrupts */ hclge_config_mac_tnl_int(hdev, false); @@ -12892,9 +12919,11 @@ static int __init hclge_init(void) static void __exit hclge_exit(void) { + hnae3_acquire_unload_lock(); hnae3_unregister_ae_algo_prepare(&ae_algo); hnae3_unregister_ae_algo(&ae_algo); destroy_workqueue(hclge_wq); + hnae3_release_unload_lock(); } module_init(hclge_init); module_exit(hclge_exit); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 5505caea88e9..bab16c2191b2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb) struct hclge_dev *hdev = vport->back; struct hclge_ptp *ptp = hdev->ptp; + if (!ptp) + return false; + if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) || test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) { ptp->tx_skipped++; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c index 43c1c18fa81f..8c057192aae6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c @@ -510,9 +510,9 @@ out: static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, struct hnae3_knic_private_info *kinfo) { -#define HCLGE_RING_REG_OFFSET 0x200 #define HCLGE_RING_INT_REG_OFFSET 0x4 + struct hnae3_queue *tqp; int i, j, reg_num; int data_num_sum; u32 *reg = data; @@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, reg_num = ARRAY_SIZE(ring_reg_addr_list); for (j = 0; j < kinfo->num_tqps; j++) { reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg); + tqp = kinfo->tqp[j]; for (i = 0; i < reg_num; i++) - *reg++ = hclge_read_dev(&hdev->hw, - ring_reg_addr_list[i] + - HCLGE_RING_REG_OFFSET * j); + *reg++ = readl_relaxed(tqp->io_base - + HCLGE_TQP_REG_OFFSET + + ring_reg_addr_list[i]); } data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 2f6ffb88e700..9ba767740a04 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1393,6 +1393,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, return ret; } +static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev, + enum hnae3_reset_type reset_type) +{ + /* When an incorrect reset type is executed, the get_reset_level + * function generates the HNAE3_NONE_RESET flag. As a result, this + * type do not need to pending. + */ + if (reset_type != HNAE3_NONE_RESET) + set_bit(reset_type, &hdev->reset_pending); +} + static int hclgevf_reset_wait(struct hclgevf_dev *hdev) { #define HCLGEVF_RESET_WAIT_US 20000 @@ -1542,7 +1553,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) hdev->rst_stats.rst_fail_cnt); if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) - set_bit(hdev->reset_type, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, hdev->reset_type); if (hclgevf_is_reset_pending(hdev)) { set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); @@ -1662,6 +1673,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr) clear_bit(HNAE3_FLR_RESET, addr); } + clear_bit(HNAE3_NONE_RESET, addr); + return rst_level; } @@ -1671,14 +1684,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev, struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hclgevf_dev *hdev = ae_dev->priv; - dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); - if (hdev->default_reset_request) hdev->reset_level = hclgevf_get_reset_level(&hdev->default_reset_request); else hdev->reset_level = HNAE3_VF_FUNC_RESET; + dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n", + hdev->reset_level); + /* reset of this VF requested */ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); @@ -1689,8 +1703,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev, static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, enum hnae3_reset_type rst_type) { +#define HCLGEVF_SUPPORT_RESET_TYPE \ + (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \ + BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \ + BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET)) + struct hclgevf_dev *hdev = ae_dev->priv; + if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) { + /* To prevent reset triggered by hclge_reset_event */ + set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); + dev_info(&hdev->pdev->dev, "unsupported reset type %d\n", + rst_type); + return; + } set_bit(rst_type, &hdev->default_reset_request); } @@ -1847,14 +1873,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) */ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { /* prepare for full reset of stack + pcie interface */ - set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET); /* "defer" schedule the reset task again */ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } else { hdev->reset_attempts++; - set_bit(hdev->reset_level, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, hdev->reset_level); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } hclgevf_reset_task_schedule(hdev); @@ -1977,7 +2003,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); dev_info(&hdev->pdev->dev, "receive reset interrupt 0x%x!\n", rst_ing_reg); - set_bit(HNAE3_VF_RESET, &hdev->reset_pending); + hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); @@ -2287,6 +2313,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); + /* timer needs to be initialized before misc irq */ + timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); mutex_init(&hdev->mbx_resp.mbx_mutex); sema_init(&hdev->reset_sem, 1); @@ -2986,7 +3014,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) HCLGEVF_DRIVER_NAME); hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); - timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); return 0; @@ -3383,8 +3410,10 @@ static int __init hclgevf_init(void) static void __exit hclgevf_exit(void) { + hnae3_acquire_unload_lock(); hnae3_unregister_ae_algo(&ae_algovf); destroy_workqueue(hclgevf_wq); + hnae3_release_unload_lock(); } module_init(hclgevf_init); module_exit(hclgevf_exit); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c index 6db415d8b917..7d9d9dbc7560 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c @@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle) void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, void *data) { -#define HCLGEVF_RING_REG_OFFSET 0x200 #define HCLGEVF_RING_INT_REG_OFFSET 0x4 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_queue *tqp; int i, j, reg_um; u32 *reg = data; @@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, reg_um = ARRAY_SIZE(ring_reg_addr_list); for (j = 0; j < hdev->num_tqps; j++) { reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg); + tqp = &hdev->htqp[j].q; for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, - ring_reg_addr_list[i] + - HCLGEVF_RING_REG_OFFSET * j); + *reg++ = readl_relaxed(tqp->io_base - + HCLGEVF_TQP_REG_OFFSET + + ring_reg_addr_list[i]); } reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 890f213da8d1..ae1f523d6841 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -172,6 +172,7 @@ err_init_txq: hinic_sq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->txqs); + nic_dev->txqs = NULL; return err; } @@ -268,6 +269,7 @@ err_init_rxq: hinic_rq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->rxqs); + nic_dev->rxqs = NULL; return err; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index f81a43d2cdfc..486fb0e20bef 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en) err = HINIC_MGMT_CMD_UNSUPPORTED; } else if (err || !out_size || vlan_filter.status) { dev_err(&pdev->dev, - "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n", + "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n", err, vlan_filter.status, out_size); err = -EINVAL; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index e95ae0d39948..0676fc547b6f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2408,6 +2408,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dma_addr_t data_dma_addr; struct netdev_queue *txq; unsigned long lpar_rc; + unsigned int skblen; union sub_crq tx_crq; unsigned int offset; bool use_scrq_send_direct = false; @@ -2522,6 +2523,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff->skb = skb; tx_buff->index = bufidx; tx_buff->pool_index = queue_num; + skblen = skb->len; memset(&tx_crq, 0, sizeof(tx_crq)); tx_crq.v1.first = IBMVNIC_CRQ_CMD; @@ -2614,7 +2616,7 @@ early_exit: netif_stop_subqueue(netdev, queue_num); } - tx_bytes += skb->len; + tx_bytes += skblen; txq_trans_cond_update(txq); ret = NETDEV_TX_OK; goto out; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 20bc40eec487..24ec9a4f1ffa 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -292,6 +292,7 @@ config ICE select DIMLIB select LIBIE select NET_DEVLINK + select PACKING select PLDMFW select DPLL help diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 98861cc6df7c..b9dd7b719832 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) } /** - * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF - * @hw: Pointer to hardware structure - * @results: Pointer array to message, results[0] is pointer to message - * @mbx: Pointer to mailbox information structure - * - * This function is a default handler for MAC/VLAN requests from the VF. - * The assumption is that in this case it is acceptable to just directly - * hand off the message from the VF to the underlying shared code. - **/ -s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) -{ - struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; - u8 mac[ETH_ALEN]; - u32 *result; - int err = 0; - bool set; - u16 vlan; - u32 vid; - - /* we shouldn't be updating rules on a disabled interface */ - if (!FM10K_VF_FLAG_ENABLED(vf_info)) - err = FM10K_ERR_PARAM; - - if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { - result = results[FM10K_MAC_VLAN_MSG_VLAN]; - - /* record VLAN id requested */ - err = fm10k_tlv_attr_get_u32(result, &vid); - if (err) - return err; - - set = !(vid & FM10K_VLAN_CLEAR); - vid &= ~FM10K_VLAN_CLEAR; - - /* if the length field has been set, this is a multi-bit - * update request. For multi-bit requests, simply disallow - * them when the pf_vid has been set. In this case, the PF - * should have already cleared the VLAN_TABLE, and if we - * allowed them, it could allow a rogue VF to receive traffic - * on a VLAN it was not assigned. In the single-bit case, we - * need to modify requests for VLAN 0 to use the default PF or - * SW vid when assigned. - */ - - if (vid >> 16) { - /* prevent multi-bit requests when PF has - * administratively set the VLAN for this VF - */ - if (vf_info->pf_vid) - return FM10K_ERR_PARAM; - } else { - err = fm10k_iov_select_vid(vf_info, (u16)vid); - if (err < 0) - return err; - - vid = err; - } - - /* update VSI info for VF in regards to VLAN table */ - err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); - } - - if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { - result = results[FM10K_MAC_VLAN_MSG_MAC]; - - /* record unicast MAC address requested */ - err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); - if (err) - return err; - - /* block attempts to set MAC for a locked device */ - if (is_valid_ether_addr(vf_info->mac) && - !ether_addr_equal(mac, vf_info->mac)) - return FM10K_ERR_PARAM; - - set = !(vlan & FM10K_VLAN_CLEAR); - vlan &= ~FM10K_VLAN_CLEAR; - - err = fm10k_iov_select_vid(vf_info, vlan); - if (err < 0) - return err; - - vlan = (u16)err; - - /* notify switch of request for new unicast address */ - err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, - mac, vlan, set, 0); - } - - if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { - result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; - - /* record multicast MAC address requested */ - err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); - if (err) - return err; - - /* verify that the VF is allowed to request multicast */ - if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) - return FM10K_ERR_PARAM; - - set = !(vlan & FM10K_VLAN_CLEAR); - vlan &= ~FM10K_VLAN_CLEAR; - - err = fm10k_iov_select_vid(vf_info, vlan); - if (err < 0) - return err; - - vlan = (u16)err; - - /* notify switch of request for new multicast address */ - err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, - mac, vlan, set); - } - - return err; -} - -/** * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode * @vf_info: VF info structure containing capability flags * @mode: Requested xcast mode diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index 8e814df709d2..ad3696893cb1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid); s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); -s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, - struct fm10k_mbx_info *); s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d4255c2706fa..c67963bfe14e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -88,6 +88,7 @@ enum i40e_state { __I40E_SERVICE_SCHED, __I40E_ADMINQ_EVENT_PENDING, __I40E_MDD_EVENT_PENDING, + __I40E_MDD_VF_PRINT_PENDING, __I40E_VFLR_EVENT_PENDING, __I40E_RESET_RECOVERY_PENDING, __I40E_TIMEOUT_RECOVERY_PENDING, @@ -191,6 +192,7 @@ enum i40e_pf_flags { */ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, I40E_FLAG_VF_VLAN_PRUNING_ENA, + I40E_FLAG_MDD_AUTO_RESET_VF, I40E_PF_FLAGS_NBITS, /* must be last */ }; @@ -572,7 +574,7 @@ struct i40e_pf { int num_alloc_vfs; /* actual number of VFs allocated */ u32 vf_aq_requests; u32 arq_overflows; /* Not fatal, possibly indicative of problems */ - + struct ratelimit_state mdd_message_rate_limit; /* DCBx/DCBNL capability for PF that indicates * whether DCBx is managed by firmware or host * based agent (LLDPAD). Also, indicates what @@ -1189,7 +1191,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, bool add); void i40e_fdir_check_and_reenable(struct i40e_pf *pf); u32 i40e_get_current_fd_count(struct i40e_pf *pf); -u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); u32 i40e_get_global_fd_count(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); @@ -1197,7 +1198,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f); -void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); @@ -1313,7 +1313,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); int i40e_get_partition_bw_setting(struct i40e_pf *pf); int i40e_set_partition_bw_setting(struct i40e_pf *pf); -int i40e_commit_partition_bw_setting(struct i40e_pf *pf); void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index f73f5930fc58..175c1320c143 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, return status; } -int -i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ u16 buff_size, - struct i40e_asq_cmd_details *cmd_details, - enum i40e_admin_queue_err *aq_status) -{ - return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size, - cmd_details, true, aq_status); -} - /** * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index e8031f1a9b4f..370b4bddee44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1805,37 +1805,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, } /** - * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting - * @hw: pointer to the hw struct - * @seid: vsi number - * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN - * @cmd_details: pointer to command details structure or NULL - **/ -int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, - u16 seid, bool enable, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_set_vsi_promiscuous_modes *cmd = - (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - u16 flags = 0; - int status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) - flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; - - cmd->promiscuous_flags = cpu_to_le16(flags); - cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); - cmd->seid = cpu_to_le16(seid); - - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** * i40e_aq_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -2436,136 +2405,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, } /** - * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule - * @hw: pointer to the hw struct - * @opcode: AQ opcode for add or delete mirror rule - * @sw_seid: Switch SEID (to which rule refers) - * @rule_type: Rule Type (ingress/egress/VLAN) - * @id: Destination VSI SEID or Rule ID - * @count: length of the list - * @mr_list: list of mirrored VSI SEIDs or VLAN IDs - * @cmd_details: pointer to command details structure or NULL - * @rule_id: Rule ID returned from FW - * @rules_used: Number of rules used in internal switch - * @rules_free: Number of rules free in internal switch - * - * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for - * VEBs/VEPA elements only - **/ -static int i40e_mirrorrule_op(struct i40e_hw *hw, - u16 opcode, u16 sw_seid, u16 rule_type, u16 id, - u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_add_delete_mirror_rule *cmd = - (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; - struct i40e_aqc_add_delete_mirror_rule_completion *resp = - (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; - u16 buf_size; - int status; - - buf_size = count * sizeof(*mr_list); - - /* prep the rest of the request */ - i40e_fill_default_direct_cmd_desc(&desc, opcode); - cmd->seid = cpu_to_le16(sw_seid); - cmd->rule_type = cpu_to_le16(rule_type & - I40E_AQC_MIRROR_RULE_TYPE_MASK); - cmd->num_entries = cpu_to_le16(count); - /* Dest VSI for add, rule_id for delete */ - cmd->destination = cpu_to_le16(id); - if (mr_list) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | - I40E_AQ_FLAG_RD)); - if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - } - - status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, - cmd_details); - if (!status || - hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { - if (rule_id) - *rule_id = le16_to_cpu(resp->rule_id); - if (rules_used) - *rules_used = le16_to_cpu(resp->mirror_rules_used); - if (rules_free) - *rules_free = le16_to_cpu(resp->mirror_rules_free); - } - return status; -} - -/** - * i40e_aq_add_mirrorrule - add a mirror rule - * @hw: pointer to the hw struct - * @sw_seid: Switch SEID (to which rule refers) - * @rule_type: Rule Type (ingress/egress/VLAN) - * @dest_vsi: SEID of VSI to which packets will be mirrored - * @count: length of the list - * @mr_list: list of mirrored VSI SEIDs or VLAN IDs - * @cmd_details: pointer to command details structure or NULL - * @rule_id: Rule ID returned from FW - * @rules_used: Number of rules used in internal switch - * @rules_free: Number of rules free in internal switch - * - * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only - **/ -int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 dest_vsi, u16 count, - __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free) -{ - if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || - rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { - if (count == 0 || !mr_list) - return -EINVAL; - } - - return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, - rule_type, dest_vsi, count, mr_list, - cmd_details, rule_id, rules_used, rules_free); -} - -/** - * i40e_aq_delete_mirrorrule - delete a mirror rule - * @hw: pointer to the hw struct - * @sw_seid: Switch SEID (to which rule refers) - * @rule_type: Rule Type (ingress/egress/VLAN) - * @count: length of the list - * @rule_id: Rule ID that is returned in the receive desc as part of - * add_mirrorrule. - * @mr_list: list of mirrored VLAN IDs to be removed - * @cmd_details: pointer to command details structure or NULL - * @rules_used: Number of rules used in internal switch - * @rules_free: Number of rules free in internal switch - * - * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only - **/ -int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 rule_id, u16 count, - __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rules_used, u16 *rules_free) -{ - /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ - if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { - /* count and mr_list shall be valid for rule_type INGRESS VLAN - * mirroring. For other rule_type, count and rule_type should - * not matter. - */ - if (count == 0 || !mr_list) - return -EINVAL; - } - - return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, - rule_type, rule_id, count, mr_list, - cmd_details, NULL, rules_used, rules_free); -} - -/** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: VF id to send msg @@ -3180,41 +3019,6 @@ i40e_aq_update_nvm_exit: } /** - * i40e_aq_rearrange_nvm - * @hw: pointer to the hw struct - * @rearrange_nvm: defines direction of rearrangement - * @cmd_details: pointer to command details structure or NULL - * - * Rearrange NVM structure, available only for transition FW - **/ -int i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aqc_nvm_update *cmd; - struct i40e_aq_desc desc; - int status; - - cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; - - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); - - rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | - I40E_AQ_NVM_REARRANGE_TO_STRUCT); - - if (!rearrange_nvm) { - status = -EINVAL; - goto i40e_aq_rearrange_nvm_exit; - } - - cmd->command_flags |= rearrange_nvm; - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - -i40e_aq_rearrange_nvm_exit: - return status; -} - -/** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested @@ -3335,44 +3139,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, } /** - * i40e_aq_restore_lldp - * @hw: pointer to the hw struct - * @setting: pointer to factory setting variable or NULL - * @restore: True if factory settings should be restored - * @cmd_details: pointer to command details structure or NULL - * - * Restore LLDP Agent factory settings if @restore set to True. In other case - * only returns factory setting in AQ response. - **/ -int -i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_lldp_restore *cmd = - (struct i40e_aqc_lldp_restore *)&desc.params.raw; - int status; - - if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { - i40e_debug(hw, I40E_DEBUG_ALL, - "Restore LLDP not supported by current FW version.\n"); - return -ENODEV; - } - - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); - - if (restore) - cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; - - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - if (setting) - *setting = cmd->command & 1; - - return status; -} - -/** * i40e_aq_stop_lldp * @hw: pointer to the hw struct * @shutdown_agent: True if LLDP Agent needs to be Shutdown @@ -4570,84 +4336,6 @@ phy_write_end: } /** - * i40e_write_phy_register - * @hw: pointer to the HW structure - * @page: registers page number - * @reg: register address in the page - * @phy_addr: PHY address on MDIO interface - * @value: PHY register value - * - * Writes value to specified PHY register - **/ -int i40e_write_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 value) -{ - int status; - - switch (hw->device_id) { - case I40E_DEV_ID_1G_BASE_T_X722: - status = i40e_write_phy_register_clause22(hw, reg, phy_addr, - value); - break; - case I40E_DEV_ID_1G_BASE_T_BC: - case I40E_DEV_ID_5G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - case I40E_DEV_ID_10G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T_X722: - case I40E_DEV_ID_25G_B: - case I40E_DEV_ID_25G_SFP28: - status = i40e_write_phy_register_clause45(hw, page, reg, - phy_addr, value); - break; - default: - status = -EIO; - break; - } - - return status; -} - -/** - * i40e_read_phy_register - * @hw: pointer to the HW structure - * @page: registers page number - * @reg: register address in the page - * @phy_addr: PHY address on MDIO interface - * @value: PHY register value - * - * Reads specified PHY register value - **/ -int i40e_read_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 *value) -{ - int status; - - switch (hw->device_id) { - case I40E_DEV_ID_1G_BASE_T_X722: - status = i40e_read_phy_register_clause22(hw, reg, phy_addr, - value); - break; - case I40E_DEV_ID_1G_BASE_T_BC: - case I40E_DEV_ID_5G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - case I40E_DEV_ID_10G_BASE_T_BC: - case I40E_DEV_ID_10G_BASE_T_X722: - case I40E_DEV_ID_25G_B: - case I40E_DEV_ID_25G_SFP28: - status = i40e_read_phy_register_clause45(hw, page, reg, - phy_addr, value); - break; - default: - status = -EIO; - break; - } - - return status; -} - -/** * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want @@ -4663,80 +4351,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) } /** - * i40e_blink_phy_link_led - * @hw: pointer to the HW structure - * @time: time how long led will blinks in secs - * @interval: gap between LED on and off in msecs - * - * Blinks PHY link LED - **/ -int i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval) -{ - u16 led_addr = I40E_PHY_LED_PROV_REG_1; - u16 gpio_led_port; - u8 phy_addr = 0; - int status = 0; - u16 led_ctl; - u8 port_num; - u16 led_reg; - u32 i; - - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); - - for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, - led_addr++) { - status = i40e_read_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - &led_reg); - if (status) - goto phy_blinking_end; - led_ctl = led_reg; - if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { - led_reg = 0; - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - led_reg); - if (status) - goto phy_blinking_end; - break; - } - } - - if (time > 0 && interval > 0) { - for (i = 0; i < time * 1000; i += interval) { - status = i40e_read_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, &led_reg); - if (status) - goto restore_config; - if (led_reg & I40E_PHY_LED_MANUAL_ON) - led_reg = 0; - else - led_reg = I40E_PHY_LED_MANUAL_ON; - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, led_reg); - if (status) - goto restore_config; - msleep(interval); - } - } - -restore_config: - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, led_ctl); - -phy_blinking_end: - return status; -} - -/** * i40e_led_get_reg - read LED register * @hw: pointer to the HW structure * @led_addr: LED register address @@ -5269,39 +4883,6 @@ i40e_find_segment_in_package(u32 segment_type, (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) /** - * i40e_find_section_in_profile - * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) - * @profile: pointer to the i40e segment header to be searched - * - * This function searches i40e segment for a particular section type. On - * success it returns a pointer to the section header, otherwise it will - * return NULL. - **/ -struct i40e_profile_section_header * -i40e_find_section_in_profile(u32 section_type, - struct i40e_profile_segment *profile) -{ - struct i40e_profile_section_header *sec; - struct i40e_section_table *sec_tbl; - u32 sec_off; - u32 i; - - if (profile->header.type != SEGMENT_TYPE_I40E) - return NULL; - - I40E_SECTION_TABLE(profile, sec_tbl); - - for (i = 0; i < sec_tbl->section_count; i++) { - sec_off = sec_tbl->section_offset[i]; - sec = I40E_SECTION_HEADER(profile, sec_off); - if (sec->section.type == section_type) - return sec; - } - - return NULL; -} - -/** * i40e_ddp_exec_aq_section - Execute generic AQ for DDP * @hw: pointer to the hw struct * @aq: command buffer containing all data to execute AQ @@ -5524,45 +5105,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, } /** - * i40e_add_pinfo_to_list - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package - * @profile_info_sec: buffer for information section - * @track_id: package tracking id - * - * Register a profile to the list of loaded profiles. - */ -int -i40e_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id) -{ - struct i40e_profile_section_header *sec = NULL; - struct i40e_profile_info *pinfo; - u32 offset = 0, info = 0; - int status = 0; - - sec = (struct i40e_profile_section_header *)profile_info_sec; - sec->tbl_size = 1; - sec->data_end = sizeof(struct i40e_profile_section_header) + - sizeof(struct i40e_profile_info); - sec->section.type = SECTION_TYPE_INFO; - sec->section.offset = sizeof(struct i40e_profile_section_header); - sec->section.size = sizeof(struct i40e_profile_info); - pinfo = (struct i40e_profile_info *)(profile_info_sec + - sec->section.offset); - pinfo->track_id = track_id; - pinfo->version = profile->version; - pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - - status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, - track_id, &offset, &info, NULL); - - return status; -} - -/** * i40e_aq_add_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 8db1eb0c1768..352e957443fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc) } /** - * i40e_dcb_hw_get_num_tc - * @hw: pointer to the hw struct - * - * Returns number of traffic classes configured in HW - **/ -u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw) -{ - u32 reg = rd32(hw, I40E_PRTDCB_GENC); - - return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg); -} - -/** * i40e_dcb_hw_rx_ets_bw_config * @hw: pointer to the hw struct * @bw_share: Bandwidth share indexed per traffic class diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index d76497566e40..d5662c639c41 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw, void i40e_dcb_hw_pfc_config(struct i40e_hw *hw, u8 pfc_en, u8 *prio_tc); void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc); -u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw); void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, u8 *mode, u8 *prio_type); void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 208c2f0857b6..6cd9da662ae1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -722,7 +722,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); dev_info(&pf->pdev->dev, " num MDD=%lld\n", - vf->num_mdd_events); + vf->mdd_tx_events.count + vf->mdd_rx_events.count); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index bce5b76f1e7a..8a7a83f83ee5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -459,6 +459,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0), I40E_PRIV_FLAG("vf-vlan-pruning", I40E_FLAG_VF_VLAN_PRUNING_ENA, 0), + I40E_PRIV_FLAG("mdd-auto-reset-vf", + I40E_FLAG_MDD_AUTO_RESET_VF, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0e1d9e2fbf38..65a702668e21 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1666,9 +1666,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * @vsi: VSI to remove from * @f: the filter to remove from the list * - * This function should be called instead of i40e_del_filter only if you know - * the exact filter you will remove already, such as via i40e_find_filter or - * i40e_find_mac. + * This function requires you've found * the exact filter you will remove + * already, such as via i40e_find_filter or i40e_find_mac. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. @@ -1698,29 +1697,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) } /** - * i40e_del_filter - Remove a MAC/VLAN filter from the VSI - * @vsi: the VSI to be searched - * @macaddr: the MAC address - * @vlan: the VLAN - * - * NOTE: This function is expected to be called with mac_filter_hash_lock - * being held. - * ANOTHER NOTE: This function MUST be called from within the context of - * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() - * instead of list_for_each_entry(). - **/ -void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) -{ - struct i40e_mac_filter *f; - - if (!vsi || !macaddr) - return; - - f = i40e_find_filter(vsi, macaddr, vlan); - __i40e_del_filter(vsi, f); -} - -/** * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered @@ -9629,19 +9605,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, } /** - * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters - * @pf: board private structure - **/ -u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) -{ - u32 val, fcnt_prog; - - val = rd32(&pf->hw, I40E_PFQF_FDSTAT); - fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); - return fcnt_prog; -} - -/** * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ @@ -11217,6 +11180,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) } /** + * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event + * @pf: board private structure + * @vf: pointer to the VF structure + * @is_tx: true - for Tx event, false - for Rx + */ +static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf, + bool is_tx) +{ + dev_err(&pf->pdev->dev, is_tx ? + "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" : + "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n", + is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count, + pf->hw.pf_id, + vf->vf_id, + vf->default_lan_addr.addr, + str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))); +} + +/** + * i40e_print_vfs_mdd_events - print VFs malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events. + */ +static void i40e_print_vfs_mdd_events(struct i40e_pf *pf) +{ + unsigned int i; + + /* check that there are pending MDD events to print */ + if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state)) + return; + + if (!__ratelimit(&pf->mdd_message_rate_limit)) + return; + + for (i = 0; i < pf->num_alloc_vfs; i++) { + struct i40e_vf *vf = &pf->vf[i]; + bool is_printed = false; + + /* only print Rx MDD event message if there are new events */ + if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { + vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count; + i40e_print_vf_mdd_event(pf, vf, false); + is_printed = true; + } + + /* only print Tx MDD event message if there are new events */ + if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { + vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count; + i40e_print_vf_mdd_event(pf, vf, true); + is_printed = true; + } + + if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF #%d\n", + i); + } +} + +/** * i40e_handle_mdd_event * @pf: pointer to the PF structure * @@ -11230,8 +11254,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u32 reg; int i; - if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) + if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) { + /* Since the VF MDD event logging is rate limited, check if + * there are pending MDD events. + */ + i40e_print_vfs_mdd_events(pf); return; + } /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); @@ -11275,36 +11304,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) /* see if one of the VFs needs its hand slapped */ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + bool is_mdd_on_tx = false; + bool is_mdd_on_rx = false; + vf = &(pf->vf[i]); reg = rd32(hw, I40E_VP_MDET_TX(i)); if (reg & I40E_VP_MDET_TX_VALID_MASK) { + set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state); wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); - vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", - i); - dev_info(&pf->pdev->dev, - "Use PF Control I/F to re-enable the VF\n"); + vf->mdd_tx_events.count++; set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); + is_mdd_on_tx = true; } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { + set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state); wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); - vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", - i); - dev_info(&pf->pdev->dev, - "Use PF Control I/F to re-enable the VF\n"); + vf->mdd_rx_events.count++; set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); + is_mdd_on_rx = true; + } + + if ((is_mdd_on_tx || is_mdd_on_rx) && + test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { + /* VF MDD event counters will be cleared by + * reset, so print the event prior to reset. + */ + if (is_mdd_on_rx) + i40e_print_vf_mdd_event(pf, vf, false); + if (is_mdd_on_tx) + i40e_print_vf_mdd_event(pf, vf, true); + + i40e_vc_reset_vf(vf, true); } } - /* re-enable mdd interrupt cause */ - clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); + + i40e_print_vfs_mdd_events(pf); } /** @@ -12614,89 +12655,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf) } /** - * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition - * @pf: board private structure - **/ -int i40e_commit_partition_bw_setting(struct i40e_pf *pf) -{ - /* Commit temporary BW setting to permanent NVM image */ - enum i40e_admin_queue_err last_aq_status; - u16 nvm_word; - int ret; - - if (pf->hw.partition_id != 1) { - dev_info(&pf->pdev->dev, - "Commit BW only works on partition 1! This is partition %d", - pf->hw.partition_id); - ret = -EOPNOTSUPP; - goto bw_commit_out; - } - - /* Acquire NVM for read access */ - ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); - last_aq_status = pf->hw.aq.asq_last_status; - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot acquire NVM for read access, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - - /* Read word 0x10 of NVM - SW compatibility word 1 */ - ret = i40e_aq_read_nvm(&pf->hw, - I40E_SR_NVM_CONTROL_WORD, - 0x10, sizeof(nvm_word), &nvm_word, - false, NULL); - /* Save off last admin queue command status before releasing - * the NVM - */ - last_aq_status = pf->hw.aq.asq_last_status; - i40e_release_nvm(&pf->hw); - if (ret) { - dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - - /* Wait a bit for NVM release to complete */ - msleep(50); - - /* Acquire NVM for write access */ - ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); - last_aq_status = pf->hw.aq.asq_last_status; - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot acquire NVM for write access, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); - goto bw_commit_out; - } - /* Write it back out unchanged to initiate update NVM, - * which will force a write of the shadow (alt) RAM to - * the NVM - thus storing the bandwidth values permanently. - */ - ret = i40e_aq_update_nvm(&pf->hw, - I40E_SR_NVM_CONTROL_WORD, - 0x10, sizeof(nvm_word), - &nvm_word, true, 0, NULL); - /* Save off last admin queue command status before releasing - * the NVM - */ - last_aq_status = pf->hw.aq.asq_last_status; - i40e_release_nvm(&pf->hw); - if (ret) - dev_info(&pf->pdev->dev, - "BW settings NOT SAVED, err %pe aq_err %s\n", - ERR_PTR(ret), - i40e_aq_str(&pf->hw, last_aq_status)); -bw_commit_out: - - return ret; -} - -/** * i40e_is_total_port_shutdown_enabled - read NVM and return value * if total port shutdown feature is enabled for this PF * @pf: board private structure @@ -15998,6 +15956,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* VF MDD event logs are rate limited to one second intervals */ + ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1); + /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 5a0699ca7ce5..099bb8ab7d70 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -27,13 +27,6 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); int -i40e_asq_send_command_v2(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details, - enum i40e_admin_queue_err *aq_status); -int i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, @@ -72,8 +65,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode); int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 *val); -int i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); /* admin send queue commands */ @@ -141,9 +132,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); -int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, - u16 seid, bool enable, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); @@ -176,14 +164,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status); -int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free); -int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rules_used, u16 *rules_free); int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, @@ -220,9 +200,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details); -int i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, @@ -234,9 +211,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw, int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details); -int -i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, bool persist, struct i40e_asq_cmd_details *cmd_details); @@ -458,13 +432,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); int i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value); -int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -int i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, @@ -477,20 +445,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); -struct i40e_profile_section_header * -i40e_find_section_in_profile(u32 section_type, - struct i40e_profile_segment *profile); int i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, u32 track_id); int i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, u32 track_id); -int -i40e_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id); - /* i40e_ddp */ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index dfa785e39458..1120f8e4bb67 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) * @notify_vf: notify vf about reset or not * Reset VF handler. **/ -static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) +void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) { struct i40e_pf *pf = vf->pf; int i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 66f95e2f3146..5cf74f16f433 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -64,6 +64,12 @@ struct i40evf_channel { u64 max_tx_rate; /* bandwidth rate allocation for VSIs */ }; +struct i40e_mdd_vf_events { + u64 count; /* total count of Rx|Tx events */ + /* count number of the last printed event */ + u64 last_printed; +}; + /* VF information structure */ struct i40e_vf { struct i40e_pf *pf; @@ -92,7 +98,9 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_req_queues; /* num of requested qps */ - u64 num_mdd_events; /* num of mdd events detected */ + /* num of mdd tx and rx events detected */ + struct i40e_mdd_vf_events mdd_rx_events; + struct i40e_mdd_vf_events mdd_tx_events; unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ @@ -120,6 +128,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); +void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf); bool i40e_reset_vf(struct i40e_vf *vf, bool flr); bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 4e885df789ef..e28f1905a4a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, WARN_ON_ONCE(1); } -static int -i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first, - struct xdp_buff *xdp, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); - - if (!xdp_buff_has_frags(first)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(first); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - xsk_buff_free(first); - return -ENOMEM; - } - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, - virt_to_page(xdp->data_hard_start), - XDP_PACKET_HEADROOM, size); - sinfo->xdp_frags_size += size; - xsk_buff_add_frag(xdp); - - return 0; -} - /** * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring * @rx_ring: Rx ring @@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) if (!first) first = bi; - else if (i40e_add_xsk_frag(rx_ring, first, bi, size)) + else if (!xsk_buff_add_frag(first, bi)) { + xsk_buff_free(first); break; + } if (++next_to_process == count) next_to_process = 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index a9e54866ae6b..852e5b62f0a5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -773,6 +773,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, f->state = IAVF_VLAN_ADD; adapter->num_vlan_filters++; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); + } else if (f->state == IAVF_VLAN_REMOVE) { + /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. + * We can safely only change the state here. + */ + f->state = IAVF_VLAN_ACTIVE; } clearout: @@ -793,8 +798,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) f = iavf_find_vlan(adapter, vlan); if (f) { - f->state = IAVF_VLAN_REMOVE; - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); + /* IAVF_ADD_VLAN means that VLAN wasn't even added yet. + * Remove it from the list. + */ + if (f->state == IAVF_VLAN_ADD) { + list_del(&f->list); + kfree(f); + adapter->num_vlan_filters--; + } else { + f->state = IAVF_VLAN_REMOVE; + iavf_schedule_aq_request(adapter, + IAVF_FLAG_AQ_DEL_VLAN_FILTER); + } } spin_unlock_bh(&adapter->mac_vlan_list_lock); @@ -1180,7 +1195,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter) q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; - napi_enable(napi); + napi_enable_locked(napi); } } @@ -1196,7 +1211,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; - napi_disable(&q_vector->napi); + napi_disable_locked(&q_vector->napi); } } @@ -1800,8 +1815,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) q_vector->v_idx = q_idx; q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add(adapter->netdev, &q_vector->napi, - iavf_napi_poll); + netif_napi_add_locked(adapter->netdev, &q_vector->napi, + iavf_napi_poll); } return 0; @@ -1827,7 +1842,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; - netif_napi_del(&q_vector->napi); + netif_napi_del_locked(&q_vector->napi); } kfree(adapter->q_vectors); adapter->q_vectors = NULL; @@ -1968,6 +1983,7 @@ err: static void iavf_finish_config(struct work_struct *work) { struct iavf_adapter *adapter; + bool netdev_released = false; int pairs, err; adapter = container_of(work, struct iavf_adapter, finish_config); @@ -1976,7 +1992,7 @@ static void iavf_finish_config(struct work_struct *work) * The dev->lock is needed to update the queue number */ rtnl_lock(); - mutex_lock(&adapter->netdev->lock); + netdev_lock(adapter->netdev); mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && @@ -1988,7 +2004,16 @@ static void iavf_finish_config(struct work_struct *work) switch (adapter->state) { case __IAVF_DOWN: + /* Set the real number of queues when reset occurs while + * state == __IAVF_DOWN + */ + pairs = adapter->num_active_queues; + netif_set_real_num_rx_queues(adapter->netdev, pairs); + netif_set_real_num_tx_queues(adapter->netdev, pairs); + if (adapter->netdev->reg_state != NETREG_REGISTERED) { + netdev_unlock(adapter->netdev); + netdev_released = true; err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", @@ -2003,11 +2028,7 @@ static void iavf_finish_config(struct work_struct *work) goto out; } } - - /* Set the real number of queues when reset occurs while - * state == __IAVF_DOWN - */ - fallthrough; + break; case __IAVF_RUNNING: pairs = adapter->num_active_queues; netif_set_real_num_rx_queues(adapter->netdev, pairs); @@ -2020,7 +2041,8 @@ static void iavf_finish_config(struct work_struct *work) out: mutex_unlock(&adapter->crit_lock); - mutex_unlock(&adapter->netdev->lock); + if (!netdev_released) + netdev_unlock(adapter->netdev); rtnl_unlock(); } @@ -2713,12 +2735,16 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, watchdog_task.work); + struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; u32 reg_val; + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { - if (adapter->state == __IAVF_REMOVE) + if (adapter->state == __IAVF_REMOVE) { + netdev_unlock(netdev); return; + } goto restart_watchdog; } @@ -2730,30 +2756,35 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_STARTUP: iavf_startup(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; @@ -2765,6 +2796,7 @@ static void iavf_watchdog_task(struct work_struct *work) * as it can loop forever */ mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { @@ -2773,6 +2805,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, (5 * HZ)); return; @@ -2780,6 +2813,7 @@ static void iavf_watchdog_task(struct work_struct *work) /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: @@ -2792,6 +2826,7 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_change_state(adapter, __IAVF_INIT_FAILED); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & @@ -2811,12 +2846,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(10)); return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; @@ -2847,6 +2884,7 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_REMOVE: default: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } @@ -2858,6 +2896,7 @@ static void iavf_watchdog_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; @@ -2865,6 +2904,7 @@ static void iavf_watchdog_task(struct work_struct *work) mutex_unlock(&adapter->crit_lock); restart_watchdog: + netdev_unlock(netdev); if (adapter->state >= __IAVF_DOWN) queue_work(adapter->wq, &adapter->adminq_task); if (adapter->aq_required) @@ -2990,12 +3030,12 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - mutex_lock(&netdev->lock); + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) queue_work(adapter->wq, &adapter->reset_task); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; } @@ -3043,7 +3083,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3184,7 +3224,7 @@ continue_reset: wake_up(&adapter->reset_waitqueue); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; reset_err: @@ -3195,7 +3235,7 @@ reset_err: iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3667,10 +3707,10 @@ exit: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return 0; - mutex_lock(&netdev->lock); + netdev_lock(netdev); netif_set_real_num_rx_queues(netdev, total_qps); netif_set_real_num_tx_queues(netdev, total_qps); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return ret; } @@ -4340,14 +4380,17 @@ static int iavf_open(struct net_device *netdev) return -EIO; } + netdev_lock(netdev); while (!mutex_trylock(&adapter->crit_lock)) { /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock * is already taken and iavf_open is called from an upper * device's notifier reacting on NETDEV_REGISTER event. * We have to leave here to avoid dead lock. */ - if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) + if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) { + netdev_unlock(netdev); return -EBUSY; + } usleep_range(500, 1000); } @@ -4396,6 +4439,7 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; @@ -4408,6 +4452,7 @@ err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return err; } @@ -4429,10 +4474,12 @@ static int iavf_close(struct net_device *netdev) u64 aq_to_restore; int status; + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (adapter->state <= __IAVF_DOWN_PENDING) { mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; } @@ -4466,6 +4513,7 @@ static int iavf_close(struct net_device *netdev) iavf_free_traffic_irqs(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in @@ -5342,6 +5390,7 @@ static int iavf_suspend(struct device *dev_d) netif_device_detach(netdev); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (netif_running(netdev)) { @@ -5353,6 +5402,7 @@ static int iavf_suspend(struct device *dev_d) iavf_reset_interrupt_capability(adapter); mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return 0; } @@ -5451,6 +5501,7 @@ static void iavf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); iavf_change_state(adapter, __IAVF_REMOVE); @@ -5487,6 +5538,7 @@ static void iavf_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); + netdev_unlock(netdev); iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 3307d551f431..9e0d9f710441 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -32,7 +32,8 @@ ice-y := ice_main.o \ ice_parser_rt.o \ ice_idc.o \ devlink/devlink.o \ - devlink/devlink_port.o \ + devlink/health.o \ + devlink/port.o \ ice_sf_eth.o \ ice_sf_vsi_vlan_ops.o \ ice_ddp.o \ diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 415445cefdb2..dbdb83567364 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -6,7 +6,7 @@ #include "ice.h" #include "ice_lib.h" #include "devlink.h" -#include "devlink_port.h" +#include "port.h" #include "ice_eswitch.h" #include "ice_fw_update.h" #include "ice_dcb_lib.h" @@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink, } break; case ICE_VERSION_RUNNING: - err = devlink_info_version_running_put(req, key, ctx->buf); + err = devlink_info_version_running_put_ext(req, key, + ctx->buf, + DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); goto out_free_ctx; } break; case ICE_VERSION_STORED: - err = devlink_info_version_stored_put(req, key, ctx->buf); + err = devlink_info_version_stored_put_ext(req, key, + ctx->buf, + DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); goto out_free_ctx; @@ -977,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv /* preallocate memory for ice_sched_node */ node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + *priv = node; return 0; @@ -1207,9 +1214,15 @@ static int ice_devlink_reinit_up(struct ice_pf *pf) struct ice_vsi *vsi = ice_get_main_vsi(pf); int err; + err = ice_init_hw(&pf->hw); + if (err) { + dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err); + return err; + } + err = ice_init_dev(pf); if (err) - return err; + goto unroll_hw_init; vsi->flags = ICE_VSI_FLAG_INIT; @@ -1232,6 +1245,8 @@ err_load: rtnl_unlock(); err_vsi_cfg: ice_deinit_dev(pf); +unroll_hw_init: + ice_deinit_hw(&pf->hw); return err; } diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c new file mode 100644 index 000000000000..ea40f7941259 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/health.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ + +#include "ice.h" +#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */ +#include "health.h" + +#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \ + devlink_fmsg_put(fmsg, #name, (obj)->name) + +#define ICE_HEALTH_STATUS_DATA_SIZE 2 + +struct ice_health_status { + enum ice_aqc_health_status code; + const char *description; + const char *solution; + const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE]; +}; + +/* + * In addition to the health status codes provided below, the firmware might + * generate Health Status Codes that are not pertinent to the end-user. + * For instance, Health Code 0x1002 is triggered when the command fails. + * Such codes should be disregarded by the end-user. + * The below lookup requires to be sorted by code. + */ + +static const char *const ice_common_port_solutions = + "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex."; +static const char *const ice_port_number_label = "Port Number"; +static const char *const ice_update_nvm_solution = "Update to the latest NVM image."; + +static const struct ice_health_status ice_health_status_lookup[] = { + {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.", + "Change or replace the module or cable.", {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM, + "Device cannot communicate with the module.", + "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.", + "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.", + "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.", + "Change or replace the module or cable. Change the port option.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.", + NULL, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.", + "Change the port option. Update to the latest NVM image."}, + {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.", + "Change the module or configure the port option to match the current module speed. Change the port option.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT, + "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.", + "Check link partner connection and configuration.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED, + "Port speed is limited by PHY capabilities.", + "Change the module to align to port option.", {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.", + "Change the port option. Update to the latest NVM image."}, + {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD, + "Failed to load the firmware image in the external PHY.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.", + ice_update_nvm_solution, {"Extended Error"}}, + {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.", + "If issue persists, call customer support.", {"Access Type"}}, + {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.", + "Update to latest base driver and DDP package."}, + {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.", + ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}}, + {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB, + "Supplied MIB file is invalid. DCB reverted to default configuration.", + "Disable FW-LLDP and check DCBx system configuration.", + {ice_port_number_label, "MIB ID"}}, +}; + +static int ice_health_status_lookup_compare(const void *a, const void *b) +{ + return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code; +} + +static const struct ice_health_status *ice_get_health_status(u16 code) +{ + struct ice_health_status key = { .code = code }; + + return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup), + sizeof(struct ice_health_status), ice_health_status_lookup_compare); +} + +static void ice_describe_status_code(struct devlink_fmsg *fmsg, + struct ice_aqc_health_status_elem *hse) +{ + static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" }; + const struct ice_health_status *health_code; + u32 internal_data[2]; + u16 status_code; + + status_code = le16_to_cpu(hse->health_status_code); + + devlink_fmsg_put(fmsg, "Syndrome", status_code); + if (status_code) { + internal_data[0] = le32_to_cpu(hse->internal_data1); + internal_data[1] = le32_to_cpu(hse->internal_data2); + + health_code = ice_get_health_status(status_code); + if (!health_code) + return; + + devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description); + if (health_code->solution) + devlink_fmsg_string_pair_put(fmsg, "Possible Solution", + health_code->solution); + + for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) { + if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA) + devlink_fmsg_u32_pair_put(fmsg, + health_code->data_label[i] ? + health_code->data_label[i] : + aux_label[i], + internal_data[i]); + } + } +} + +static int +ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.port_status); + return 0; +} + +static int +ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + void *priv_ctx, struct netlink_ext_ack __always_unused *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.port_status); + return 0; +} + +static int +ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.fw_status); + return 0; +} + +static int +ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + void *priv_ctx, struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.fw_status); + return 0; +} + +static void ice_config_health_events(struct ice_pf *pf, bool enable) +{ + u8 enable_bits = 0; + int ret; + + if (enable) + enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK; + + ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits); + if (ret) + dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n", + str_enable_disable(enable), ret, + ice_aq_str(pf->hw.adminq.sq_last_status)); +} + +/** + * ice_process_health_status_event - Process the health status event from FW + * @pf: pointer to the PF structure + * @event: event structure containing the Health Status Event opcode + * + * Decode the Health Status Events and print the associated messages + */ +void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + const struct ice_aqc_health_status_elem *health_info; + u16 count; + + health_info = (struct ice_aqc_health_status_elem *)event->msg_buf; + count = le16_to_cpu(event->desc.params.get_health_status.health_status_count); + + if (count > (event->buf_len / sizeof(*health_info))) { + dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n"); + return; + } + + for (size_t i = 0; i < count; i++) { + const struct ice_health_status *health_code; + u16 status_code; + + status_code = le16_to_cpu(health_info->health_status_code); + health_code = ice_get_health_status(status_code); + + if (health_code) { + switch (le16_to_cpu(health_info->event_source)) { + case ICE_AQC_HEALTH_STATUS_GLOBAL: + pf->health_reporters.fw_status = *health_info; + devlink_health_report(pf->health_reporters.fw, + "FW syndrome reported", NULL); + break; + case ICE_AQC_HEALTH_STATUS_PF: + case ICE_AQC_HEALTH_STATUS_PORT: + pf->health_reporters.port_status = *health_info; + devlink_health_report(pf->health_reporters.port, + "Port syndrome reported", NULL); + break; + default: + dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n"); + } + } else { + u32 data1, data2; + u16 source; + + source = le16_to_cpu(health_info->event_source); + data1 = le32_to_cpu(health_info->internal_data1); + data2 = le32_to_cpu(health_info->internal_data2); + dev_dbg(ice_pf_to_dev(pf), + "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x", + status_code, source, data1, data2); + } + health_info++; + } +} + +/** + * ice_devlink_health_report - boilerplate to call given @reporter + * + * @reporter: devlink health reporter to call, do nothing on NULL + * @msg: message to pass up, "event name" is fine + * @priv_ctx: typically some event struct + */ +static void ice_devlink_health_report(struct devlink_health_reporter *reporter, + const char *msg, void *priv_ctx) +{ + if (!reporter) + return; + + /* We do not do auto recovering, so return value of the below function + * will always be 0, thus we do ignore it. + */ + devlink_health_report(reporter, msg, priv_ctx); +} + +struct ice_mdd_event { + enum ice_mdd_src src; + u16 vf_num; + u16 queue; + u8 pf_num; + u8 event; +}; + +static const char *ice_mdd_src_to_str(enum ice_mdd_src src) +{ + switch (src) { + case ICE_MDD_SRC_TX_PQM: + return "tx_pqm"; + case ICE_MDD_SRC_TX_TCLAN: + return "tx_tclan"; + case ICE_MDD_SRC_TX_TDPU: + return "tx_tdpu"; + case ICE_MDD_SRC_RX: + return "rx"; + default: + return "invalid"; + } +} + +static int +ice_mdd_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct ice_mdd_event *mdd_event = priv_ctx; + const char *src; + + if (!mdd_event) + return 0; + + src = ice_mdd_src_to_str(mdd_event->src); + + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_put(fmsg, "src", src); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue); + devlink_fmsg_obj_nest_end(fmsg); + + return 0; +} + +/** + * ice_report_mdd_event - Report an MDD event through devlink health + * @pf: the PF device structure + * @src: the HW block that was the source of this MDD event + * @pf_num: the pf_num on which the MDD event occurred + * @vf_num: the vf_num on which the MDD event occurred + * @event: the event type of the MDD event + * @queue: the queue on which the MDD event occurred + * + * Report an MDD event that has occurred on this PF. + */ +void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num, + u16 vf_num, u8 event, u16 queue) +{ + struct ice_mdd_event ev = { + .src = src, + .pf_num = pf_num, + .vf_num = vf_num, + .event = event, + .queue = queue, + }; + + ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev); +} + +/** + * ice_fmsg_put_ptr - put hex value of pointer into fmsg + * + * @fmsg: devlink fmsg under construction + * @name: name to pass + * @ptr: 64 bit value to print as hex and put into fmsg + */ +static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name, + void *ptr) +{ + char buf[sizeof(ptr) * 3]; + + sprintf(buf, "%p", ptr); + devlink_fmsg_put(fmsg, name, buf); +} + +struct ice_tx_hang_event { + u32 head; + u32 intr; + u16 vsi_num; + u16 queue; + u16 next_to_clean; + u16 next_to_use; + struct ice_tx_ring *tx_ring; +}; + +static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct ice_tx_hang_event *event = priv_ctx; + struct sk_buff *skb; + + if (!event) + return 0; + + skb = event->tx_ring->tx_buf->skb; + devlink_fmsg_obj_nest_start(fmsg); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use); + devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name); + ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc); + ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma); + ice_fmsg_put_ptr(fmsg, "skb-ptr", skb); + devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc, + event->tx_ring->count * sizeof(struct ice_tx_desc)); + devlink_fmsg_dump_skb(fmsg, skb); + devlink_fmsg_obj_nest_end(fmsg); + + return 0; +} + +void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring, + u16 vsi_num, u32 head, u32 intr) +{ + struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf; + + buf->tx_ring = tx_ring; + buf->vsi_num = vsi_num; + buf->head = head; + buf->intr = intr; +} + +void ice_report_tx_hang(struct ice_pf *pf) +{ + struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf; + struct ice_tx_ring *tx_ring = buf->tx_ring; + + struct ice_tx_hang_event ev = { + .head = buf->head, + .intr = buf->intr, + .vsi_num = buf->vsi_num, + .queue = tx_ring->q_index, + .next_to_clean = tx_ring->next_to_clean, + .next_to_use = tx_ring->next_to_use, + .tx_ring = tx_ring, + }; + + ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev); +} + +static struct devlink_health_reporter * +ice_init_devlink_rep(struct ice_pf *pf, + const struct devlink_health_reporter_ops *ops) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct devlink_health_reporter *rep; + const u64 graceful_period = 0; + + rep = devl_health_reporter_create(devlink, ops, graceful_period, pf); + if (IS_ERR(rep)) { + struct device *dev = ice_pf_to_dev(pf); + + dev_err(dev, "failed to create devlink %s health report er", + ops->name); + return NULL; + } + return rep; +} + +#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \ + ._field = ice_##_name##_reporter_##_field, + +#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \ + static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \ + .name = #_name, \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \ + } + +#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \ + static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \ + .name = #_name, \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \ + } + +ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump); +ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump); +ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose); +ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose); + +/** + * ice_health_init - allocate and init all ice devlink health reporters and + * accompanied data + * + * @pf: PF struct + */ +void ice_health_init(struct ice_pf *pf) +{ + struct ice_health *reps = &pf->health_reporters; + + reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops); + reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops); + + if (ice_is_fw_health_report_supported(&pf->hw)) { + reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops); + reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops); + ice_config_health_events(pf, true); + } +} + +/** + * ice_deinit_devl_reporter - destroy given devlink health reporter + * @reporter: reporter to destroy + */ +static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter) +{ + if (reporter) + devl_health_reporter_destroy(reporter); +} + +/** + * ice_health_deinit - deallocate all ice devlink health reporters and + * accompanied data + * + * @pf: PF struct + */ +void ice_health_deinit(struct ice_pf *pf) +{ + ice_deinit_devl_reporter(pf->health_reporters.mdd); + ice_deinit_devl_reporter(pf->health_reporters.tx_hang); + if (ice_is_fw_health_report_supported(&pf->hw)) { + ice_deinit_devl_reporter(pf->health_reporters.fw); + ice_deinit_devl_reporter(pf->health_reporters.port); + ice_config_health_events(pf, false); + } +} + +static +void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter) +{ + if (reporter) + devlink_health_reporter_state_update(reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); +} + +/** + * ice_health_clear - clear devlink health issues after a reset + * @pf: the PF device structure + * + * Mark the PF in healthy state again after a reset has completed. + */ +void ice_health_clear(struct ice_pf *pf) +{ + ice_health_assign_healthy_state(pf->health_reporters.mdd); + ice_health_assign_healthy_state(pf->health_reporters.tx_hang); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h new file mode 100644 index 000000000000..5edfc4d2adce --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/health.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _HEALTH_H_ +#define _HEALTH_H_ + +#include <linux/types.h> + +/** + * DOC: health.h + * + * This header file stores everything that is needed for broadly understood + * devlink health mechanism for ice driver. + */ + +struct ice_aqc_health_status_elem; +struct ice_pf; +struct ice_tx_ring; +struct ice_rq_event_info; + +enum ice_mdd_src { + ICE_MDD_SRC_TX_PQM, + ICE_MDD_SRC_TX_TCLAN, + ICE_MDD_SRC_TX_TDPU, + ICE_MDD_SRC_RX, +}; + +/** + * struct ice_health - stores ice devlink health reporters and accompanied data + * @fw: devlink health reporter for FW Health Status events + * @mdd: devlink health reporter for MDD detection event + * @port: devlink health reporter for Port Health Status events + * @tx_hang: devlink health reporter for tx_hang event + * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from + * non-sleeping context + * @tx_ring: ring that the hang occurred on + * @head: descriptor head + * @intr: interrupt register value + * @vsi_num: VSI owning the queue that the hang occurred on + * @fw_status: buffer for last received FW Status event + * @port_status: buffer for last received Port Status event + */ +struct ice_health { + struct devlink_health_reporter *fw; + struct devlink_health_reporter *mdd; + struct devlink_health_reporter *port; + struct devlink_health_reporter *tx_hang; + struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf, + struct ice_tx_ring *tx_ring; + u32 head; + u32 intr; + u16 vsi_num; + ); + struct ice_aqc_health_status_elem fw_status; + struct ice_aqc_health_status_elem port_status; +}; + +void ice_process_health_status_event(struct ice_pf *pf, + struct ice_rq_event_info *event); + +void ice_health_init(struct ice_pf *pf); +void ice_health_deinit(struct ice_pf *pf); +void ice_health_clear(struct ice_pf *pf); + +void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring, + u16 vsi_num, u32 head, u32 intr); +void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num, + u16 vf_num, u8 event, u16 queue); +void ice_report_tx_hang(struct ice_pf *pf); + +#endif /* _HEALTH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c index c6779d9dffff..767419a67fef 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c +++ b/drivers/net/ethernet/intel/ice/devlink/port.c @@ -5,7 +5,7 @@ #include "ice.h" #include "devlink.h" -#include "devlink_port.h" +#include "port.h" #include "ice_lib.h" #include "ice_fltr.h" diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h index d60efc340945..d60efc340945 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h +++ b/drivers/net/ethernet/intel/ice/devlink/port.h diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2f5d6f974185..71e05d30f0fd 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -78,6 +78,7 @@ #include "ice_irq.h" #include "ice_dpll.h" #include "ice_adapter.h" +#include "devlink/health.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -665,6 +666,7 @@ struct ice_pf { struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_dplls dplls; struct device *hwmon_dev; + struct ice_health health_reporters; u8 num_quanta_prof_used; }; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1489a8ceec51..bdee499f991a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -12,6 +12,13 @@ #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 +#define ICE_RXQ_CTX_SIZE_DWORDS 8 +#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TXQ_CTX_SZ 22 + +typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t; +typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t; + struct ice_aqc_generic { __le32 param0; __le32 param1; @@ -1491,7 +1498,6 @@ struct ice_aqc_dnl_equa_param { #define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT) -#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT) @@ -1665,6 +1671,7 @@ struct ice_aqc_get_port_options_elem { #define ICE_AQC_PORT_OPT_MAX_LANE_25G 5 #define ICE_AQC_PORT_OPT_MAX_LANE_50G 6 #define ICE_AQC_PORT_OPT_MAX_LANE_100G 7 +#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8 u8 global_scid[2]; u8 phy_scid[2]; @@ -1807,6 +1814,7 @@ struct ice_aqc_nvm_pass_comp_tbl { #define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0 #define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1 #define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2 +#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3 u8 component_response_code; /* Response only */ #define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0 #define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1 @@ -2084,10 +2092,10 @@ struct ice_aqc_add_txqs_perq { __le16 txq_id; u8 rsvd[2]; __le32 q_teid; - u8 txq_ctx[22]; + ice_txq_ctx_buf_t txq_ctx; u8 rsvd2[2]; struct ice_aqc_txsched_elem info; -}; +} __packed; /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of @@ -2264,6 +2272,8 @@ struct ice_aqc_get_pkg_info_resp { struct ice_aqc_get_pkg_info pkg_info[]; }; +#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0) + /* Get CGU abilities command response data structure (indirect 0x0C61) */ struct ice_aqc_get_cgu_abilities { u8 num_inputs; @@ -2508,6 +2518,87 @@ enum ice_aqc_fw_logging_mod { ICE_AQC_FW_LOG_ID_MAX, }; +enum ice_aqc_health_status_mask { + ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0), + ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1), + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2), +}; + +/* Set Health Status (direct 0xFF20) */ +struct ice_aqc_set_health_status_cfg { + u8 event_source; + u8 reserved[15]; +}; + +enum ice_aqc_health_status { + ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101, + ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102, + ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103, + ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104, + ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105, + ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106, + ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107, + ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108, + ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109, + ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B, + ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C, + ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D, + ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F, + ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110, + ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111, + ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112, + ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113, + ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114, + ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115, + ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116, + ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117, + ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120, + ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121, + ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500, + ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501, + ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502, + ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503, + ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504, + ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505, + ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506, + ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507, + ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508, + ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509, + ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A, + ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B, + ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C, + ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D, + ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000, + ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001, + ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002, +}; + +/* Get Health Status (indirect 0xFF22) */ +struct ice_aqc_get_health_status { + __le16 health_status_count; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +enum ice_aqc_health_status_scope { + ICE_AQC_HEALTH_STATUS_PF = 0x1, + ICE_AQC_HEALTH_STATUS_PORT = 0x2, + ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3, +}; + +#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF + +/* Get Health Status event buffer entry (0xFF22), + * repeated per reported health status. + */ +struct ice_aqc_health_status_elem { + __le16 health_status_code; + __le16 event_source; + __le32 internal_data1; + __le32 internal_data2; +}; + /* Set FW Logging configuration (indirect 0xFF30) * Register for FW Logging (indirect 0xFF31) * Query FW Logging (indirect 0xFF32) @@ -2648,6 +2739,8 @@ struct ice_aq_desc { struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; + struct ice_aqc_set_health_status_cfg set_health_status_cfg; + struct ice_aqc_get_health_status get_health_status; struct ice_aqc_dnl_call_command dnl_call; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; @@ -2850,6 +2943,10 @@ enum ice_adminq_opc { /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, + /* System Diagnostic commands */ + ice_aqc_opc_set_health_status_cfg = 0xFF20, + ice_aqc_opc_get_health_status = 0xFF22, + /* FW Logging Commands */ ice_aqc_opc_fw_logs_config = 0xFF30, ice_aqc_opc_fw_logs_register = 0xFF31, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 82a9cd4ec7ae..b2af8e3586f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -454,6 +454,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; + /* Enable descriptor prefetch */ + rlan_ctx.prefena = 1; + /* PF acts as uplink for switchdev; set flex descriptor with src_vsi * metadata and flags to allow redirecting to PR netdev */ @@ -910,8 +913,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); + ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx); /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 496d86cbd13f..7a2a2e8da8fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -6,6 +6,7 @@ #include "ice_adminq_cmd.h" #include "ice_flow.h" #include "ice_ptp_hw.h" +#include <linux/packing.h> #define ICE_PF_RESET_WAIT_COUNT 300 #define ICE_MAX_NETLIST_SIZE 10 @@ -308,6 +309,42 @@ bool ice_is_e825c(struct ice_hw *hw) } /** + * ice_is_pf_c827 - check if pf contains c827 phy + * @hw: pointer to the hw struct + * + * Return: true if the device has c827 phy. + */ +static bool ice_is_pf_c827(struct ice_hw *hw) +{ + struct ice_aqc_get_link_topo cmd = {}; + u8 node_part_number; + u16 node_handle; + int status; + + if (hw->mac_type != ICE_MAC_E810) + return false; + + if (hw->device_id != ICE_DEV_ID_E810C_QSFP) + return true; + + cmd.addr.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); + cmd.addr.topo_params.index = 0; + + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + + if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) + return false; + + if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) + return true; + + return false; +} + +/** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure * @@ -1025,6 +1062,33 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } /** + * ice_wait_for_fw - wait for full FW readiness + * @hw: pointer to the hardware structure + * @timeout: milliseconds that can elapse before timing out + * + * Return: 0 on success, -ETIMEDOUT on timeout. + */ +static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) +{ + int fw_loading; + u32 elapsed = 0; + + while (elapsed <= timeout) { + fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; + + /* firmware was not yet loaded, we have to wait more */ + if (fw_loading) { + elapsed += 100; + msleep(100); + continue; + } + return 0; + } + + return -ETIMEDOUT; +} + +/** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ @@ -1173,8 +1237,19 @@ int ice_init_hw(struct ice_hw *hw) mutex_init(&hw->tnl_lock); ice_init_chk_recipe_reuse_support(hw); - return 0; + /* Some cards require longer initialization times + * due to necessity of loading FW from an external source. + * This can take even half a minute. + */ + if (ice_is_pf_c827(hw)) { + status = ice_wait_for_fw(hw, 30000); + if (status) { + dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out"); + goto err_unroll_fltr_mgmt_struct; + } + } + return 0; err_unroll_fltr_mgmt_struct: ice_cleanup_fltr_mgmt_struct(hw); err_unroll_sched: @@ -1360,39 +1435,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req) } /** - * ice_copy_rxq_ctx_to_hw + * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers * @hw: pointer to the hardware structure - * @ice_rxq_ctx: pointer to the rxq context + * @rxq_ctx: pointer to the packed Rx queue context * @rxq_index: the index of the Rx queue - * - * Copies rxq context from dense structure to HW register space */ -static int -ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) +static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, + const ice_rxq_ctx_buf_t *rxq_ctx, + u32 rxq_index) { - u8 i; - - if (!ice_rxq_ctx) - return -EINVAL; - - if (rxq_index > QRX_CTRL_MAX_INDEX) - return -EINVAL; - /* Copy each dword separately to HW */ - for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { - wr32(hw, QRX_CONTEXT(i, rxq_index), - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { + u32 ctx = ((const u32 *)rxq_ctx)[i]; - ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); - } + wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); - return 0; + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); + } } +#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ + PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) + /* LAN Rx Queue Context */ -static const struct ice_ctx_ele ice_rlan_ctx_info[] = { - /* Field Width LSB */ +static const struct packed_field_u8 ice_rlan_ctx_fields[] = { + /* Field Width LSB */ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), @@ -1413,35 +1480,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), - { 0 } }; /** - * ice_write_rxq_ctx + * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer + * @ctx: the Rx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Rx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, + ice_rxq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + +/** + * ice_write_rxq_ctx - Write Rx Queue context to hardware * @hw: pointer to the hardware structure - * @rlan_ctx: pointer to the rxq context + * @rlan_ctx: pointer to the unpacked Rx queue context * @rxq_index: the index of the Rx queue * - * Converts rxq context from sparse to dense structure and then writes - * it to HW register space and enables the hardware to prefetch descriptors - * instead of only fetching them on demand + * Pack the sparse Rx Queue context into dense hardware format and write it + * into the HW register space. + * + * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. */ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { - u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + ice_rxq_ctx_buf_t buf = {}; - if (!rlan_ctx) + if (rxq_index > QRX_CTRL_MAX_INDEX) return -EINVAL; - rlan_ctx->prefena = 1; + ice_pack_rxq_ctx(rlan_ctx, &buf); + ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); - ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); - return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); + return 0; } /* LAN Tx Queue Context */ -const struct ice_ctx_ele ice_tlan_ctx_info[] = { +static const struct packed_field_u8 ice_tlan_ctx_fields[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), @@ -1470,10 +1552,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), - { 0 } }; +/** + * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer + * @ctx: the Tx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Tx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + /* Sideband Queue command wrappers */ /** @@ -2547,6 +2641,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); + info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0); info->ena_ports = logical_id; info->tmr_own_map = phys_id; @@ -2569,6 +2664,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->ts_ll_read); ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", info->ts_ll_int_read); + ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n", + info->ll_phy_tmr_update); ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", info->ena_ports); ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", @@ -2709,40 +2806,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** - * ice_is_pf_c827 - check if pf contains c827 phy - * @hw: pointer to the hw struct - */ -bool ice_is_pf_c827(struct ice_hw *hw) -{ - struct ice_aqc_get_link_topo cmd = {}; - u8 node_part_number; - u16 node_handle; - int status; - - if (hw->mac_type != ICE_MAC_E810) - return false; - - if (hw->device_id != ICE_DEV_ID_E810C_QSFP) - return true; - - cmd.addr.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); - cmd.addr.topo_params.index = 0; - - status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, - &node_handle); - - if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) - return false; - - if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) - return true; - - return false; -} - -/** * ice_is_phy_rclk_in_netlist * @hw: pointer to the hw struct * @@ -4096,6 +4159,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, } /** + * ice_get_phy_lane_number - Get PHY lane number for current adapter + * @hw: pointer to the hw struct + * + * Return: PHY lane number on success, negative error code otherwise. + */ +int ice_get_phy_lane_number(struct ice_hw *hw) +{ + struct ice_aqc_get_port_options_elem *options; + unsigned int lport = 0; + unsigned int lane; + int err; + + options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL); + if (!options) + return -ENOMEM; + + for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) { + u8 options_count = ICE_AQC_PORT_OPT_MAX; + u8 speed, active_idx, pending_idx; + bool active_valid, pending_valid; + + err = ice_aq_get_port_options(hw, options, &options_count, lane, + true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (err) + goto err; + + if (!active_valid) + continue; + + speed = options[active_idx].max_lane_speed; + /* If we don't get speed for this lane, it's unoccupied */ + if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G) + continue; + + if (hw->pf_id == lport) { + kfree(options); + return lane; + } + + lport++; + } + + /* PHY lane not found */ + err = -ENXIO; +err: + kfree(options); + return err; +} + +/** * ice_aq_sff_eeprom * @hw: pointer to the HW struct * @lport: bits [7:0] = logical port, bit [8] = logical port valid @@ -4558,205 +4672,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, /* End of FW Admin Queue command wrappers */ /** - * ice_pack_ctx_byte - write a byte to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u8 src_byte, dest_byte, mask; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - src_byte = *from; - src_byte <<= shift_width; - src_byte &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_byte, dest, sizeof(dest_byte)); - - dest_byte &= ~mask; /* get the bits not changing */ - dest_byte |= src_byte; /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_byte, sizeof(dest_byte)); -} - -/** - * ice_pack_ctx_word - write a word to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u16 src_word, mask; - __le16 dest_word; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_word = *(u16 *)from; - src_word <<= shift_width; - src_word &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_word, dest, sizeof(dest_word)); - - dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ - dest_word |= cpu_to_le16(src_word); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_word, sizeof(dest_word)); -} - -/** - * ice_pack_ctx_dword - write a dword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u32 src_dword, mask; - __le32 dest_dword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_dword = *(u32 *)from; - src_dword <<= shift_width; - src_dword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_dword, dest, sizeof(dest_dword)); - - dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ - dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_dword, sizeof(dest_dword)); -} - -/** - * ice_pack_ctx_qword - write a qword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u64 src_qword, mask; - __le64 dest_qword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_qword = *(u64 *)from; - src_qword <<= shift_width; - src_qword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_qword, dest, sizeof(dest_qword)); - - dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ - dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_qword, sizeof(dest_qword)); -} - -/** - * ice_set_ctx - set context bits in packed structure - * @hw: pointer to the hardware structure - * @src_ctx: pointer to a generic non-packed context structure - * @dest_ctx: pointer to memory for the packed structure - * @ce_info: List of Rx context elements - */ -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - int f; - - for (f = 0; ce_info[f].width; f++) { - /* We have to deal with each element of the FW response - * using the correct size so that we are correct regardless - * of the endianness of the machine. - */ - if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { - ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", - f, ce_info[f].width, ce_info[f].size_of); - continue; - } - switch (ce_info[f].size_of) { - case sizeof(u8): - ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u16): - ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u32): - ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u64): - ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); - break; - default: - return -EINVAL; - } - } - - return 0; -} - -/** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct * @vsi_handle: software VSI handle @@ -6032,6 +5947,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) } /** + * ice_is_fw_health_report_supported - checks if firmware supports health events + * @hw: pointer to the hardware structure + * + * Return: true if firmware supports health status reports, + * false otherwise + */ +bool ice_is_fw_health_report_supported(struct ice_hw *hw) +{ + return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ, + ICE_FW_API_HEALTH_REPORT_MIN, + ICE_FW_API_HEALTH_REPORT_PATCH); +} + +/** + * ice_aq_set_health_status_cfg - Configure FW health events + * @hw: pointer to the HW struct + * @event_source: type of diagnostic events to enable + * + * Configure the health status event types that the firmware will send to this + * PF. The supported event types are: PF-specific, all PFs, and global. + * + * Return: 0 on success, negative error code otherwise. + */ +int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source) +{ + struct ice_aqc_set_health_status_cfg *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_health_status_cfg; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg); + + cmd->event_source = event_source; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** * ice_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the HW struct * @mib_type: Local, Remote or both Local and Remote MIBs diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 27208a60cece..15ba38543738 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); -extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info); + +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf); extern struct mutex ice_global_cfg_lock_sw; @@ -113,7 +112,6 @@ int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); -bool ice_is_pf_c827(struct ice_hw *hw); bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw); bool ice_is_clock_mux_in_netlist(struct ice_hw *hw); bool ice_is_cgu_in_netlist(struct ice_hw *hw); @@ -143,6 +141,8 @@ int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); +bool ice_is_fw_health_report_supported(struct ice_hw *hw); +int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source); int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, u8 serdes_num, int *output); int @@ -193,6 +193,7 @@ ice_aq_get_port_options(struct ice_hw *hw, int ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, u8 new_option); +int ice_get_phy_lane_number(struct ice_hw *hw); int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index d5ad6d84007c..8d806d8ad761 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -2053,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf) struct kthread_worker *kworker; kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); - kworker = kthread_create_worker(0, "ice-dplls-%s", + kworker = kthread_run_worker(0, "ice-dplls-%s", dev_name(ice_pf_to_dev(pf))); if (IS_ERR(kworker)) return PTR_ERR(kworker); @@ -2065,6 +2065,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf) } /** + * ice_dpll_phase_range_set - initialize phase adjust range helper + * @range: pointer to phase adjust range struct to be initialized + * @phase_adj: a value to be used as min(-)/max(+) boundary + */ +static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range, + u32 phase_adj) +{ + range->min = -phase_adj; + range->max = phase_adj; +} + +/** * ice_dpll_init_info_pins_generic - initializes generic pins info * @pf: board private structure * @input: if input pins initialized @@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input) for (i = 0; i < pin_num; i++) { pins[i].idx = i; pins[i].prop.board_label = labels[i]; - pins[i].prop.phase_range.min = phase_adj_max; - pins[i].prop.phase_range.max = -phase_adj_max; + ice_dpll_phase_range_set(&pins[i].prop.phase_range, + phase_adj_max); pins[i].prop.capabilities = cap; pins[i].pf = pf; ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); @@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, struct ice_hw *hw = &pf->hw; struct ice_dpll_pin *pins; unsigned long caps; + u32 phase_adj_max; u8 freq_supp_num; bool input; @@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, case ICE_DPLL_PIN_TYPE_INPUT: pins = pf->dplls.inputs; num_pins = pf->dplls.num_inputs; + phase_adj_max = pf->dplls.input_phase_adj_max; input = true; break; case ICE_DPLL_PIN_TYPE_OUTPUT: pins = pf->dplls.outputs; num_pins = pf->dplls.num_outputs; + phase_adj_max = pf->dplls.output_phase_adj_max; input = false; break; default: @@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, return ret; caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE); - pins[i].prop.phase_range.min = - pf->dplls.input_phase_adj_max; - pins[i].prop.phase_range.max = - -pf->dplls.input_phase_adj_max; } else { - pins[i].prop.phase_range.min = - pf->dplls.output_phase_adj_max; - pins[i].prop.phase_range.max = - -pf->dplls.output_phase_adj_max; ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps); if (ret) return ret; } + ice_dpll_phase_range_set(&pins[i].prop.phase_range, + phase_adj_max); pins[i].prop.capabilities = caps; ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); if (ret) @@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu) dp->dpll_idx = abilities.pps_dpll_idx; d->num_inputs = abilities.num_inputs; d->num_outputs = abilities.num_outputs; - d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj); - d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj); + d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) & + ICE_AQC_GET_CGU_MAX_PHASE_ADJ; + d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) & + ICE_AQC_GET_CGU_MAX_PHASE_ADJ; alloc_size = sizeof(*d->inputs) * d->num_inputs; d->inputs = kzalloc(alloc_size, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index ac7db100e2cd..5c7dcf21b222 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -5,7 +5,7 @@ #define _ICE_ESWITCH_H_ #include <net/devlink.h> -#include "devlink/devlink_port.h" +#include "devlink/port.h" #ifdef CONFIG_ICE_SWITCHDEV void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 3072634bf049..f241493a6ac8 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -710,7 +710,6 @@ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 }, { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf }, { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf }, - { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate }, { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf }, { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf }, { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc }, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index 8f2ad1c172c0..23b2cfbc9684 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -15,7 +15,6 @@ struct ice_serdes_equalization_to_ethtool { int rx_equ_post1; int rx_equ_bflf; int rx_equ_bfhf; - int rx_equ_drate; int rx_equ_ctle_gainhf; int rx_equ_ctle_gainlf; int rx_equ_ctle_gaindc; diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 2702a0da5c3e..70c201f569ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -6,6 +6,7 @@ #include <linux/crc32.h> #include <linux/pldmfw.h> #include "ice.h" +#include "ice_lib.h" #include "ice_fw_update.h" struct ice_fwu_priv { @@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code, case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED: dev_info(dev, "firmware has rejected updating %s\n", component); break; + case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK: + if (ice_is_recovery_mode(&pf->hw)) + return 0; + break; } switch (code) { @@ -1004,13 +1009,20 @@ int ice_devlink_flash_update(struct devlink *devlink, return -EOPNOTSUPP; } - if (!hw->dev_caps.common_cap.nvm_unified_update) { + if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) { NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); return -EOPNOTSUPP; } memset(&priv, 0, sizeof(priv)); + if (params->component && strcmp(params->component, "fw.mgmt") == 0) { + priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT; + priv.context.component_identifier = NVM_COMP_ID_NVM; + } else if (params->component) { + return -EOPNOTSUPP; + } + /* the E822 device needs a slightly different ops */ if (hw->mac_type == ICE_MAC_GENERIC) priv.context.ops = &ice_fwu_ops_e822; diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index f02e8ca55375..b2148dbe49b2 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) pf->gnss_serial = gnss; kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); - kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); + kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev)); if (IS_ERR(kworker)) { kfree(gnss); return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 611577ebc29d..1479b45738af 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits { ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; -#define ICE_RXQ_CTX_SIZE_DWORDS 8 -#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 #define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) -/* RLAN Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* RLAN Rx queue context data */ struct ice_rlan_ctx { u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; #define ICE_RLAN_BASE_S 7 u64 base; u16 qlen; #define ICE_RLAN_CTX_DBUF_S 7 - u16 dbuf; /* bigger than needed, see above for reason */ + u8 dbuf; #define ICE_RLAN_CTX_HBUF_S 6 - u16 hbuf; /* bigger than needed, see above for reason */ + u8 hbuf; u8 dtype; u8 dsize; u8 crcstrip; @@ -401,29 +393,15 @@ struct ice_rlan_ctx { u8 hsplit_0; u8 hsplit_1; u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ + u16 rxmax; u8 tphrdesc_ena; u8 tphwdesc_ena; u8 tphdata_ena; u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 lrxqthresh; u8 prefena; /* NOTE: normally must be set to 1 at init */ }; -struct ice_ctx_ele { - u16 offset; - u16 size_of; - u16 width; - u16 lsb; -}; - -#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ - .offset = offsetof(struct _struct, _ele), \ - .size_of = sizeof_field(struct _struct, _ele), \ - .width = _width, \ - .lsb = _lsb, \ -} - /* for hsplit_0 field of Rx RLAN context */ enum ice_rlan_ctx_rx_hsplit_0 { ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, @@ -551,18 +529,12 @@ enum ice_tx_ctx_desc_eipt_offload { #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 -/* Tx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* Tx queue context data */ struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ u8 port_num; - u16 cgd_num; /* bigger than needed, see above for reason */ + u8 cgd_num; u8 pf_num; u16 vmvf_num; u8 vmvf_type; @@ -573,7 +545,7 @@ struct ice_tlan_ctx { u8 tsyn_ena; u8 internal_usage_flag; u8 alt_vlan; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; u8 wb_mode; u8 tphrd_desc; u8 tphrd; @@ -582,7 +554,7 @@ struct ice_tlan_ctx { u16 qnum_in_func; u8 itr_notification_mode; u8 adjust_prof_id; - u32 qlen; /* bigger than needed, see above for reason */ + u16 qlen; u8 quanta_prof_idx; u8 tso_ena; u16 tso_qnum; @@ -590,7 +562,6 @@ struct ice_tlan_ctx { u8 drop_ena; u8 cache_prof_idx; u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a7d45a8ce7ac..38a1c8372180 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1700,6 +1700,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf) return true; } +#define ICE_FW_MODE_REC_M BIT(1) +bool ice_is_recovery_mode(struct ice_hw *hw) +{ + return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M; +} + /** * ice_update_eth_stats - Update VSI-specific ethernet statistics counters * @vsi: the VSI to be updated diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 10d6fc479a32..eabb35834a24 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -90,6 +90,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_rdma_ena(struct ice_pf *pf); +bool ice_is_recovery_mode(struct ice_hw *hw); bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi); bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0ab35607e5d5..c3a0fb97c5ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -14,7 +14,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" #include "devlink/devlink.h" -#include "devlink/devlink_port.h" +#include "devlink/port.h" #include "ice_sf_eth.h" #include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the @@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return 0; - ice_ptp_link_change(pf, pf->hw.pf_id, link_up); + ice_ptp_link_change(pf, link_up); if (ice_is_dcb_active(pf)) { if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) @@ -1567,6 +1567,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) case ice_aqc_opc_lldp_set_mib_change: ice_dcb_process_lldp_set_mib_change(pf, &event); break; + case ice_aqc_opc_get_health_status: + ice_process_health_status_event(pf, &event); + break; default: dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", qtype, opcode); @@ -1816,6 +1819,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num, + event, queue); wr32(hw, GL_MDET_TX_PQM, 0xffffffff); } @@ -1829,6 +1834,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num, + event, queue); wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); } @@ -1842,6 +1849,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event, + queue); wr32(hw, GL_MDET_RX, 0xffffffff); } @@ -2355,6 +2364,18 @@ static void ice_check_media_subtask(struct ice_pf *pf) } } +static void ice_service_task_recovery_mode(struct work_struct *work) +{ + struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); + + set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); + ice_clean_adminq_subtask(pf); + + ice_service_task_complete(pf); + + mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); +} + /** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct @@ -2364,9 +2385,11 @@ static void ice_service_task(struct work_struct *work) struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); unsigned long start_time = jiffies; - /* subtasks */ + if (pf->health_reporters.tx_hang_buf.tx_ring) { + ice_report_tx_hang(pf); + pf->health_reporters.tx_hang_buf.tx_ring = NULL; + } - /* process reset requests first */ ice_reset_subtask(pf); /* bail if a reset/recovery cycle is pending or rebuild failed */ @@ -4741,55 +4764,12 @@ static void ice_decfg_netdev(struct ice_vsi *vsi) vsi->netdev = NULL; } -/** - * ice_wait_for_fw - wait for full FW readiness - * @hw: pointer to the hardware structure - * @timeout: milliseconds that can elapse before timing out - */ -static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) -{ - int fw_loading; - u32 elapsed = 0; - - while (elapsed <= timeout) { - fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; - - /* firmware was not yet loaded, we have to wait more */ - if (fw_loading) { - elapsed += 100; - msleep(100); - continue; - } - return 0; - } - - return -ETIMEDOUT; -} - int ice_init_dev(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int err; - err = ice_init_hw(hw); - if (err) { - dev_err(dev, "ice_init_hw failed: %d\n", err); - return err; - } - - /* Some cards require longer initialization times - * due to necessity of loading FW from an external source. - * This can take even half a minute. - */ - if (ice_is_pf_c827(hw)) { - err = ice_wait_for_fw(hw, 30000); - if (err) { - dev_err(dev, "ice_wait_for_fw timed out"); - return err; - } - } - ice_init_feature_support(pf); err = ice_init_ddp_config(hw, pf); @@ -4810,7 +4790,7 @@ int ice_init_dev(struct ice_pf *pf) err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); - goto err_init_pf; + return err; } pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; @@ -4834,7 +4814,7 @@ int ice_init_dev(struct ice_pf *pf) if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_interrupt_scheme; + goto unroll_pf_init; } /* In case of MSIX we are going to setup the misc vector right here @@ -4845,17 +4825,15 @@ int ice_init_dev(struct ice_pf *pf) err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_req_irq_msix_misc; + goto unroll_irq_scheme_init; } return 0; -err_req_irq_msix_misc: +unroll_irq_scheme_init: ice_clear_interrupt_scheme(pf); -err_init_interrupt_scheme: +unroll_pf_init: ice_deinit_pf(pf); -err_init_pf: - ice_deinit_hw(hw); return err; } @@ -5087,6 +5065,7 @@ static int ice_init_devlink(struct ice_pf *pf) return err; ice_devlink_init_regions(pf); + ice_health_init(pf); ice_devlink_register(pf); return 0; @@ -5095,6 +5074,7 @@ static int ice_init_devlink(struct ice_pf *pf) static void ice_deinit_devlink(struct ice_pf *pf) { ice_devlink_unregister(pf); + ice_health_deinit(pf); ice_devlink_destroy_regions(pf); ice_devlink_unregister_params(pf); } @@ -5249,6 +5229,36 @@ void ice_unload(struct ice_pf *pf) ice_decfg_netdev(vsi); } +static int ice_probe_recovery_mode(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n"); + + INIT_HLIST_HEAD(&pf->aq_wait_list); + spin_lock_init(&pf->aq_wait_lock); + init_waitqueue_head(&pf->aq_wait_queue); + + timer_setup(&pf->serv_tmr, ice_service_timer, 0); + pf->serv_tmr_period = HZ; + INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); + clear_bit(ICE_SERVICE_SCHED, pf->state); + err = ice_create_all_ctrlq(&pf->hw); + if (err) + return err; + + scoped_guard(devl, priv_to_devlink(pf)) { + err = ice_init_devlink(pf); + if (err) + return err; + } + + ice_service_task_restart(pf); + + return 0; +} + /** * ice_probe - Device initialization routine * @pdev: PCI device information struct @@ -5312,13 +5322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) } pci_set_master(pdev); - - adapter = ice_adapter_get(pdev); - if (IS_ERR(adapter)) - return PTR_ERR(adapter); - pf->pdev = pdev; - pf->adapter = adapter; pci_set_drvdata(pdev, pf); set_bit(ICE_DOWN, pf->state); /* Disable service task until DOWN bit is cleared */ @@ -5346,29 +5350,47 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) hw->debug_mask = debug; #endif + if (ice_is_recovery_mode(hw)) + return ice_probe_recovery_mode(pf); + + err = ice_init_hw(hw); + if (err) { + dev_err(dev, "ice_init_hw failed: %d\n", err); + return err; + } + + adapter = ice_adapter_get(pdev); + if (IS_ERR(adapter)) { + err = PTR_ERR(adapter); + goto unroll_hw_init; + } + pf->adapter = adapter; + err = ice_init(pf); if (err) - goto err_init; + goto unroll_adapter; devl_lock(priv_to_devlink(pf)); err = ice_load(pf); if (err) - goto err_load; + goto unroll_init; err = ice_init_devlink(pf); if (err) - goto err_init_devlink; + goto unroll_load; devl_unlock(priv_to_devlink(pf)); return 0; -err_init_devlink: +unroll_load: ice_unload(pf); -err_load: +unroll_init: devl_unlock(priv_to_devlink(pf)); ice_deinit(pf); -err_init: +unroll_adapter: ice_adapter_put(pdev); +unroll_hw_init: + ice_deinit_hw(hw); return err; } @@ -5448,6 +5470,14 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + if (ice_is_recovery_mode(&pf->hw)) { + ice_service_task_stop(pf); + scoped_guard(devl, priv_to_devlink(pf)) { + ice_deinit_devlink(pf); + } + return; + } + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); @@ -6790,7 +6820,7 @@ static int ice_up_complete(struct ice_vsi *vsi) ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); - ice_ptp_link_change(pf, pf->hw.pf_id, true); + ice_ptp_link_change(pf, true); } /* Perform an initial read of the statistics registers now to @@ -7260,7 +7290,7 @@ int ice_down(struct ice_vsi *vsi) if (vsi->netdev) { vlan_err = ice_vsi_del_vlan_zero(vsi); - ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); + ice_ptp_link_change(vsi->back, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } @@ -7793,6 +7823,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* if we get here, reset flow is successful */ clear_bit(ICE_RESET_FAILED, pf->state); + ice_health_clear(pf); + ice_plug_aux_dev(pf); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) ice_lag_rebuild(pf); @@ -8283,16 +8315,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) if (tx_ring) { struct ice_hw *hw = &pf->hw; - u32 head, val = 0; + u32 head, intr = 0; head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); /* Read interrupt register */ - val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); + intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, txqueue, tx_ring->next_to_clean, - head, tx_ring->next_to_use, val); + head, tx_ring->next_to_use, intr); + + ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); } pf->tx_timeout_last_recovery = jiffies; diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h index 6509d807627c..4f56d53d56b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_parser.h +++ b/drivers/net/ethernet/intel/ice/ice_parser.h @@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, /*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ #define ICE_BST_TCAM_TABLE_SIZE 256 #define ICE_BST_TCAM_KEY_SIZE 20 -#define ICE_BST_KEY_TCAM_SIZE 19 /* Boost TCAM item */ struct ice_bst_tcam_item { @@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag); #define ICE_PARSER_GPR_NUM 128 #define ICE_PARSER_FLG_NUM 64 #define ICE_PARSER_ERR_NUM 16 -#define ICE_BST_KEY_SIZE 10 #define ICE_MARKER_ID_SIZE 9 #define ICE_MARKER_MAX_SIZE \ (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1) @@ -431,13 +429,13 @@ struct ice_parser_rt { u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV]; u16 pkt_len; u16 po; - u8 bst_key[ICE_BST_KEY_SIZE]; + u8 bst_key[ICE_BST_TCAM_KEY_SIZE]; struct ice_pg_cam_key pg_key; + u8 pg_prio; struct ice_alu *alu0; struct ice_alu *alu1; struct ice_alu *alu2; struct ice_pg_cam_action *action; - u8 pg_prio; struct ice_gpr_pu pu; u8 markers[ICE_MARKER_ID_SIZE]; bool protocols[ICE_PO_PAIR_SIZE]; diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c index dedf5e854e4b..3995d662e050 100644 --- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c +++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c @@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt, else key[idd] = imem->b_kb.prio; - idd = ICE_BST_KEY_TCAM_SIZE - 1; + idd = ICE_BST_TCAM_KEY_SIZE - 2; for (i = idd; i >= 0; i--) { int j; j = ho + idd - i; if (j < ICE_PARSER_MAX_PKT_LEN) - key[i] = rt->pkt_buf[ho + idd - i]; + key[i] = rt->pkt_buf[j]; else key[i] = 0; } - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n"); - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - key[0], key[1], key[2], key[3], key[4], - key[5], key[6], key[7], key[8], key[9]); - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n"); + ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER, + KBUILD_MODNAME ": Generated Boost TCAM Key", + key, ICE_BST_TCAM_KEY_SIZE); } static u16 ice_bit_rev_u16(u16 v, int len) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index a999fface272..e26320ce52ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = { }; static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { - /* name, gpio */ - { TIME_SYNC, { 4, -1 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { TIME_SYNC, { 4, -1 }, { 0, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 11 }}, }; static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { - /* name, gpio */ - { SDP0, { 0, 0 }}, - { SDP1, { 1, 1 }}, - { SDP2, { 2, 2 }}, - { SDP3, { 3, 3 }}, - { TIME_SYNC, { 4, -1 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 15, 14 }}, + { SDP1, { 1, 1 }, { 15, 14 }}, + { SDP2, { 2, 2 }, { 15, 14 }}, + { SDP3, { 3, 3 }, { 15, 14 }}, + { TIME_SYNC, { 4, -1 }, { 11, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 9 }}, }; static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { - /* name, gpio */ - { SDP0, { 0, 0 }}, - { SDP1, { 1, 1 }}, - { SDP2, { 2, 2 }}, - { SDP3, { 3, 3 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 0, 1 }}, + { SDP1, { 1, 1 }, { 0, 1 }}, + { SDP2, { 2, 2 }, { 0, 1 }}, + { SDP3, { 3, 3 }, { 0, 1 }}, + { ONE_PPS, { -1, 5 }, { 0, 1 }}, }; static const char ice_pin_names_nvm[][64] = { @@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = { }; static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { - /* name, gpio */ - { GNSS, { 1, -1 }}, - { SMA1, { 1, 0 }}, - { UFL1, { -1, 0 }}, - { SMA2, { 3, 2 }}, - { UFL2, { 3, -1 }}, + /* name, gpio, delay */ + { GNSS, { 1, -1 }, { 0, 0 }}, + { SMA1, { 1, 0 }, { 0, 1 }}, + { UFL1, { -1, 0 }, { 0, 1 }}, + { SMA2, { 3, 2 }, { 0, 1 }}, + { UFL2, { 3, -1 }, { 0, 0 }}, }; static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) @@ -464,7 +464,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) */ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) { + struct ice_e810_params *params; struct ice_ptp_port *ptp_port; + unsigned long flags; struct sk_buff *skb; struct ice_pf *pf; @@ -473,6 +475,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); + params = &pf->hw.ptp.phy.e810; /* Drop packets which have waited for more than 2 seconds */ if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { @@ -489,11 +492,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS; + /* Write TS index to read to the PF register so the FW can read it */ - wr32(&pf->hw, PF_SB_ATQBAL, - TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) | - TS_LL_READ_TS); + wr32(&pf->hw, REG_LL_PROXY_H, + REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | + REG_LL_PROXY_H_EXEC); tx->last_ll_ts_idx_read = idx; + + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); } /** @@ -504,35 +513,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { struct skb_shared_hwtstamps shhwtstamps = {}; u8 idx = tx->last_ll_ts_idx_read; + struct ice_e810_params *params; struct ice_ptp_port *ptp_port; u64 raw_tstamp, tstamp; bool drop_ts = false; struct sk_buff *skb; + unsigned long flags; + struct device *dev; struct ice_pf *pf; - u32 val; + u32 reg_ll_high; if (!tx->init || tx->last_ll_ts_idx_read < 0) return; ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); + dev = ice_pf_to_dev(pf); + params = &pf->hw.ptp.phy.e810; ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); - val = rd32(&pf->hw, PF_SB_ATQBAL); + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS)) + dev_dbg(dev, "%s: low latency interrupt request not in progress?\n", + __func__); + + /* Read the low 32 bit value */ + raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L); + /* Read the status together with high TS part */ + reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H); + + /* Wake up threads waiting on low latency interface */ + params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS; + + wake_up_locked(¶ms->atqbal_wq); + + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); /* When the bit is cleared, the TS is ready in the register */ - if (val & TS_LL_READ_TS) { + if (reg_ll_high & REG_LL_PROXY_H_EXEC) { dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); return; } /* High 8 bit value of the TS is on the bits 16:23 */ - raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val); - raw_tstamp <<= 32; - - /* Read the low 32 bit value */ - raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH); + raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32; /* Devices using this interface always verify the timestamp differs * relative to the last cached timestamp value. @@ -1388,10 +1414,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) /** * ice_ptp_link_change - Reconfigure PTP after link status change * @pf: Board private structure - * @port: Port for which the PHY start is set * @linkup: Link is up or down */ -void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +void ice_ptp_link_change(struct ice_pf *pf, bool linkup) { struct ice_ptp_port *ptp_port; struct ice_hw *hw = &pf->hw; @@ -1399,14 +1424,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) if (pf->ptp.state != ICE_PTP_READY) return; - if (WARN_ON_ONCE(port >= hw->ptp.num_lports)) - return; - ptp_port = &pf->ptp.port; - if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) - port *= 2; - if (WARN_ON_ONCE(ptp_port->port_num != port)) - return; /* Update cached link status for this port immediately */ ptp_port->link_up = linkup; @@ -1566,18 +1584,29 @@ void ice_ptp_extts_event(struct ice_pf *pf) * Event is defined in GLTSYN_EVNT_0 register */ for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { + int pin_desc_idx; + /* Check if channel is enabled */ - if (pf->ptp.ext_ts_irq & (1 << chan)) { - lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); - hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); - event.timestamp = (((u64)hi) << 32) | lo; - event.type = PTP_CLOCK_EXTTS; - event.index = chan; - - /* Fire event */ - ptp_clock_event(pf->ptp.clock, &event); - pf->ptp.ext_ts_irq &= ~(1 << chan); + if (!(pf->ptp.ext_ts_irq & (1 << chan))) + continue; + + lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); + hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); + event.timestamp = (u64)hi << 32 | lo; + + /* Add delay compensation */ + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); + if (pin_desc_idx >= 0) { + const struct ice_ptp_pin_desc *desc; + + desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; + event.timestamp -= desc->delay[0]; } + + event.type = PTP_CLOCK_EXTTS; + event.index = chan; + pf->ptp.ext_ts_irq &= ~(1 << chan); + ptp_clock_event(pf->ptp.clock, &event); } } @@ -1772,9 +1801,9 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, int on) { + unsigned int gpio_pin, prop_delay_ns; u64 clk, period, start, phase; struct ice_hw *hw = &pf->hw; - unsigned int gpio_pin; int pin_desc_idx; if (rq->flags & ~PTP_PEROUT_PHASE) @@ -1785,6 +1814,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, return -EIO; gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; + prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; /* If we're disabling the output or period is 0, clear out CLKO and TGT @@ -1816,11 +1846,11 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, * at the next multiple of period, maintaining phase. */ clk = ice_ptp_read_src_clk_reg(pf, NULL); - if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw)) + if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) start = div64_u64(clk + period - 1, period) * period + phase; /* Compensate for propagation delay from the generator to the pin. */ - start -= ice_prop_delay(hw); + start -= prop_delay_ns; return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); } @@ -3080,7 +3110,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) /* Allocate a kworker for handling work required for the ports * connected to the PTP hardware clock. */ - kworker = kthread_create_worker(0, "ice-ptp-%s", + kworker = kthread_run_worker(0, "ice-ptp-%s", dev_name(ice_pf_to_dev(pf))); if (IS_ERR(kworker)) return PTR_ERR(kworker); @@ -3164,10 +3194,17 @@ void ice_ptp_init(struct ice_pf *pf) { struct ice_ptp *ptp = &pf->ptp; struct ice_hw *hw = &pf->hw; - int err; + int lane_num, err; ptp->state = ICE_PTP_INITIALIZING; + lane_num = ice_get_phy_lane_number(hw); + if (lane_num < 0) { + err = lane_num; + goto err_exit; + } + + ptp->port.port_num = (u8)lane_num; ice_ptp_init_hw(hw); ice_ptp_init_tx_interrupt_mode(pf); @@ -3188,10 +3225,6 @@ void ice_ptp_init(struct ice_pf *pf) if (err) goto err_exit; - ptp->port.port_num = hw->pf_id; - if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) - ptp->port.port_num = hw->pf_id * 2; - err = ice_ptp_init_port(pf, &ptp->port); if (err) goto err_exit; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 824e73b677a4..a1d0e988c084 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -211,6 +211,7 @@ enum ice_ptp_pin_nvm { * struct ice_ptp_pin_desc - hardware pin description data * @name_idx: index of the name of pin in ice_pin_names * @gpio: the associated GPIO input and output pins + * @delay: input and output signal delays in nanoseconds * * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array * for the device. Device families have separate sets of available pins with @@ -219,6 +220,7 @@ enum ice_ptp_pin_nvm { struct ice_ptp_pin_desc { int name_idx; int gpio[2]; + unsigned int delay[2]; }; /** @@ -310,7 +312,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type); void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); -void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); +void ice_ptp_link_change(struct ice_pf *pf, bool linkup); #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) { @@ -358,7 +360,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf, } static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { } -static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup) { } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 585ce200c60f..ac46d1183300 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = { .rx_offset = { .serdes = 0xffffeb27, /* -10.42424 */ .no_fec = 0xffffcccd, /* -25.6 */ - .fc = 0xfffe0014, /* -255.96 */ + .fc = 0xfffc557b, /* -469.26 */ .sfd = 0x4a4, /* 2.32 */ .bs_ds = 0x32 /* 0.0969697 */ } @@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 823437500, /* 823.4375 MHz PLL */ /* nominal_incval */ 0x136e44fabULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ @@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ @@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 796875000, /* 796.875 MHz */ /* nominal_incval */ 0x141414141ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ @@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 816000000, /* 816 MHz */ /* nominal_incval */ 0x139b9b9baULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ @@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 830078125, /* 830.78125 MHz */ /* nominal_incval */ 0x134679aceULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ @@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, }; @@ -761,9 +749,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = { /* rx_desk_rsgb_par */ 644531250, /* 644.53125 MHz Reed Solomon gearbox */ /* tx_desk_rsgb_pcs */ - 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + 390625000, /* 390.625 MHz Reed Solomon gearbox */ /* rx_desk_rsgb_pcs */ - 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + 390625000, /* 390.625 MHz Reed Solomon gearbox */ /* tx_fixed_delay */ 1620, /* pmd_adj_divisor */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 518893f23372..ec91822e9280 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -391,7 +391,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw24.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -469,7 +469,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw24.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -546,7 +546,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw23.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -651,7 +651,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, /* Log the current clock configuration */ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.ts_pll_enable ? "enabled" : "disabled", + str_enabled_disabled(dw24.ts_pll_enable), ice_clk_src_str(dw23.time_ref_sel), ice_clk_freq_str(dw9.time_ref_freq_sel), ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); @@ -901,30 +901,45 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) */ /** + * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number + * @hw: pointer to the HW struct + * @port: destination port + * + * Return: destination sideband queue PHY device. + */ +static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw, + u8 port) +{ + /* On a single complex E825, PHY 0 is always destination device phy_0 + * and PHY 1 is phy_0_peer. + */ + if (port >= hw->ptp.ports_per_phy) + return eth56g_phy_1; + else + return eth56g_phy_0; +} + +/** * ice_write_phy_eth56g - Write a PHY port register * @hw: pointer to the HW struct - * @phy_idx: PHY index + * @port: destination port * @addr: PHY register address * @val: Value to write * * Return: 0 on success, other error codes when failed to write to PHY */ -static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, - u32 val) +static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val) { - struct ice_sbq_msg_input phy_msg; + struct ice_sbq_msg_input msg = { + .dest_dev = ice_ptp_get_dest_dev_e825(hw, port), + .opcode = ice_sbq_msg_wr, + .msg_addr_low = lower_16_bits(addr), + .msg_addr_high = upper_16_bits(addr), + .data = val + }; int err; - phy_msg.opcode = ice_sbq_msg_wr; - - phy_msg.msg_addr_low = lower_16_bits(addr); - phy_msg.msg_addr_high = upper_16_bits(addr); - - phy_msg.data = val; - phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; - - err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); - + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", err); @@ -935,41 +950,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, /** * ice_read_phy_eth56g - Read a PHY port register * @hw: pointer to the HW struct - * @phy_idx: PHY index + * @port: destination port * @addr: PHY register address * @val: Value to write * * Return: 0 on success, other error codes when failed to read from PHY */ -static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, - u32 *val) +static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val) { - struct ice_sbq_msg_input phy_msg; + struct ice_sbq_msg_input msg = { + .dest_dev = ice_ptp_get_dest_dev_e825(hw, port), + .opcode = ice_sbq_msg_rd, + .msg_addr_low = lower_16_bits(addr), + .msg_addr_high = upper_16_bits(addr) + }; int err; - phy_msg.opcode = ice_sbq_msg_rd; - - phy_msg.msg_addr_low = lower_16_bits(addr); - phy_msg.msg_addr_high = upper_16_bits(addr); - - phy_msg.data = 0; - phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; - - err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); - if (err) { + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); + if (err) ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", err); - return err; - } - - *val = phy_msg.data; + else + *val = msg.data; - return 0; + return err; } /** * ice_phy_res_address_eth56g - Calculate a PHY port register address - * @port: Port number to be written + * @hw: pointer to the HW struct + * @lane: Lane number to be written * @res_type: resource type (register/memory) * @offset: Offset from PHY port register base * @addr: The result address @@ -978,17 +988,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, * * %0 - success * * %EINVAL - invalid port number or resource type */ -static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type, - u32 offset, u32 *addr) +static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane, + enum eth56g_res_type res_type, + u32 offset, + u32 *addr) { - u8 lane = port % ICE_PORTS_PER_QUAD; - u8 phy = ICE_GET_QUAD_NUM(port); - if (res_type >= NUM_ETH56G_PHY_RES) return -EINVAL; - *addr = eth56g_phy_res[res_type].base[phy] + + /* Lanes 4..7 are in fact 0..3 on a second PHY */ + lane %= hw->ptp.ports_per_phy; + *addr = eth56g_phy_res[res_type].base[0] + lane * eth56g_phy_res[res_type].step + offset; + return 0; } @@ -1008,19 +1020,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type, static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, u32 val, enum eth56g_res_type res_type) { - u8 phy_port = port % hw->ptp.ports_per_phy; - u8 phy_idx = port / hw->ptp.ports_per_phy; u32 addr; int err; if (port >= hw->ptp.num_lports) return -EINVAL; - err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr); if (err) return err; - return ice_write_phy_eth56g(hw, phy_idx, addr, val); + return ice_write_phy_eth56g(hw, port, addr, val); } /** @@ -1039,19 +1049,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, u32 *val, enum eth56g_res_type res_type) { - u8 phy_port = port % hw->ptp.ports_per_phy; - u8 phy_idx = port / hw->ptp.ports_per_phy; u32 addr; int err; if (port >= hw->ptp.num_lports) return -EINVAL; - err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr); if (err) return err; - return ice_read_phy_eth56g(hw, phy_idx, addr, val); + return ice_read_phy_eth56g(hw, port, addr, val); } /** @@ -1201,6 +1209,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, } /** + * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * + * Return: + * * %0 - success + * * %EIO - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u32 offset, u32 val) +{ + u32 addr; + + if (port >= hw->ptp.num_lports) + return -EIO; + + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset; + + return ice_write_phy_eth56g(hw, port, addr, val); +} + +/** + * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to read + * + * Return: + * * %0 - success + * * %EIO - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u32 offset, u32 *val) +{ + u32 addr; + + if (port >= hw->ptp.num_lports) + return -EIO; + + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset; + + return ice_read_phy_eth56g(hw, port, addr, val); +} + +/** * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register * @low_addr: the low address to check * @high_addr: on return, contains the high address of the 64bit register @@ -1919,7 +1977,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li) */ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) { - u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); u32 val; int err; @@ -1934,8 +1991,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { case ICE_ETH56G_LNK_SPD_1G: case ICE_ETH56G_LNK_SPD_2_5G: - err = ice_read_ptp_reg_eth56g(hw, port_blk, - PHY_GPCS_CONFIG_REG0, &val); + err = ice_read_quad_ptp_reg_eth56g(hw, port, + PHY_GPCS_CONFIG_REG0, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d", err); @@ -1946,8 +2003,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M, ICE_ETH56G_NOMINAL_TX_THRESH); - err = ice_write_ptp_reg_eth56g(hw, port_blk, - PHY_GPCS_CONFIG_REG0, val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, + PHY_GPCS_CONFIG_REG0, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d", err); @@ -1988,50 +2045,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) */ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port) { - u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); - u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1); + u8 quad_lane = port % ICE_PORTS_PER_QUAD; + u32 addr, val, peer_delay; bool enable, sfd_ena; - u32 val, peer_delay; int err; enable = hw->ptp.phy.eth56g.onestep_ena; peer_delay = hw->ptp.phy.eth56g.peer_delay; sfd_ena = hw->ptp.phy.eth56g.sfd_ena; - /* PHY_PTP_1STEP_CONFIG */ - err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val); + addr = PHY_PTP_1STEP_CONFIG; + err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val); if (err) return err; if (enable) - val |= blk_port; + val |= BIT(quad_lane); else - val &= ~blk_port; + val &= ~BIT(quad_lane); val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M); - err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); if (err) return err; - /* PHY_PTP_1STEP_PEER_DELAY */ + addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane); val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay); if (peer_delay) val |= PHY_PTP_1STEP_PD_ADD_PD_M; val |= PHY_PTP_1STEP_PD_DLY_V_M; - err = ice_write_ptp_reg_eth56g(hw, port_blk, - PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); if (err) return err; val &= ~PHY_PTP_1STEP_PD_DLY_V_M; - err = ice_write_ptp_reg_eth56g(hw, port_blk, - PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); if (err) return err; - /* PHY_MAC_XIF_MODE */ - err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val); + addr = PHY_MAC_XIF_MODE; + err = ice_read_mac_reg_eth56g(hw, port, addr, &val); if (err) return err; @@ -2051,7 +2105,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port) FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) | FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena); - return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val); + return ice_write_mac_reg_eth56g(hw, port, addr, val); } /** @@ -2093,21 +2147,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs, bool fc, bool rs, enum ice_eth56g_link_spd spd) { - u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1); - u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); u32 bitslip; int err; if (!bs || rs) return 0; - if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) + if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) { err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP, &bitslip); - else - err = ice_read_ptp_reg_eth56g(hw, port_blk, - PHY_REG_SD_BIT_SLIP(port_offset), - &bitslip); + } else { + u8 quad_lane = port % ICE_PORTS_PER_QUAD; + u32 addr; + + addr = PHY_REG_SD_BIT_SLIP(quad_lane); + err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip); + } if (err) return 0; @@ -2667,59 +2722,29 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port, } /** - * ice_is_muxed_topo - detect breakout 2x50G topology for E825C - * @hw: pointer to the HW struct - * - * Return: true if it's 2x50 breakout topology, false otherwise - */ -static bool ice_is_muxed_topo(struct ice_hw *hw) -{ - u8 link_topo; - bool mux; - u32 val; - - val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); - mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val); - val = rd32(hw, GLGEN_MAC_LINK_TOPO); - link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val); - - return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS); -} - -/** - * ice_ptp_init_phy_e825c - initialize PHY parameters + * ice_ptp_init_phy_e825 - initialize PHY parameters * @hw: pointer to the HW struct */ -static void ice_ptp_init_phy_e825c(struct ice_hw *hw) +static void ice_ptp_init_phy_e825(struct ice_hw *hw) { struct ice_ptp_hw *ptp = &hw->ptp; struct ice_eth56g_params *params; - u8 phy; + u32 phy_rev; + int err; ptp->phy_model = ICE_PHY_ETH56G; params = &ptp->phy.eth56g; params->onestep_ena = false; params->peer_delay = 0; params->sfd_ena = false; - params->phy_addr[0] = eth56g_phy_0; - params->phy_addr[1] = eth56g_phy_1; params->num_phys = 2; ptp->ports_per_phy = 4; ptp->num_lports = params->num_phys * ptp->ports_per_phy; ice_sb_access_ena_eth56g(hw, true); - for (phy = 0; phy < params->num_phys; phy++) { - u32 phy_rev; - int err; - - err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev); - if (err || phy_rev != PHY_REVISION_ETH56G) { - ptp->phy_model = ICE_PHY_UNSUP; - return; - } - } - - ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw); + err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev); + if (err || phy_rev != PHY_REVISION_ETH56G) + ptp->phy_model = ICE_PHY_UNSUP; } /* E822 family functions @@ -2738,10 +2763,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw, struct ice_sbq_msg_input *msg, u8 port, u16 offset) { - int phy_port, phy, quadtype; + int phy_port, quadtype; phy_port = port % hw->ptp.ports_per_phy; - phy = port / hw->ptp.ports_per_phy; quadtype = ICE_GET_QUAD_NUM(port) % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy); @@ -2753,12 +2777,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw, msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); } - if (phy == 0) - msg->dest_dev = rmn_0; - else if (phy == 1) - msg->dest_dev = rmn_1; - else - msg->dest_dev = rmn_2; + msg->dest_dev = rmn_0; } /** @@ -4857,33 +4876,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) static int ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo) { + struct ice_e810_params *params = &hw->ptp.phy.e810; + unsigned long flags; u32 val; - u8 i; + int err; + + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); + return err; + } /* Write TS index to read to the PF register so the FW can read it */ - val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS; - wr32(hw, PF_SB_ATQBAL, val); + val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); /* Read the register repeatedly until the FW provides us the TS */ - for (i = TS_LL_READ_RETRIES; i > 0; i--) { - val = rd32(hw, PF_SB_ATQBAL); + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10, + REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); + return err; + } - /* When the bit is cleared, the TS is ready in the register */ - if (!(FIELD_GET(TS_LL_READ_TS, val))) { - /* High 8 bit value of the TS is on the bits 16:23 */ - *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val); + /* High 8 bit value of the TS is on the bits 16:23 */ + *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val); - /* Read the low 32 bit value and set the TS valid bit */ - *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID; - return 0; - } + /* Read the low 32 bit value and set the TS valid bit */ + *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID; - udelay(10); - } + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); - /* FW failed to provide the TS in time */ - ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); - return -EINVAL; + return 0; } /** @@ -5066,6 +5098,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) } /** + * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment + * @hw: pointer to HW struct + * @adj: adjustment value to program + * + * Use the low latency firmware interface to program PHY time adjustment to + * all PHY ports. + * + * Return: 0 on success, -EBUSY on timeout + */ +static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj) +{ + const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + struct ice_e810_params *params = &hw->ptp.phy.e810; + u32 val; + int err; + + spin_lock_irq(¶ms->atqbal_wq.lock); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + wr32(hw, REG_LL_PROXY_L, adj); + val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) | + FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); + + /* Read the register repeatedly until the FW indicates completion */ + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), + 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n"); + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + spin_unlock_irq(¶ms->atqbal_wq.lock); + + return 0; +} + +/** * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment * @hw: pointer to HW struct * @adj: adjustment value to program @@ -5083,6 +5164,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) u8 tmr_idx; int err; + if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update) + return ice_ptp_prep_phy_adj_ll_e810(hw, adj); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Adjustments are represented as signed 2's complement values in @@ -5106,6 +5190,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) } /** + * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change + * @hw: pointer to HW struct + * @incval: The new 40bit increment value to prepare + * + * Use the low latency firmware interface to program PHY time increment value + * for all PHY ports. + * + * Return: 0 on success, -EBUSY on timeout + */ +static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval) +{ + const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + struct ice_e810_params *params = &hw->ptp.phy.e810; + u32 val; + int err; + + spin_lock_irq(¶ms->atqbal_wq.lock); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval)); + val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) | + FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) | + FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); + + /* Read the register repeatedly until the FW indicates completion */ + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), + 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n"); + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + spin_unlock_irq(¶ms->atqbal_wq.lock); + + return 0; +} + +/** * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change * @hw: pointer to HW struct * @incval: The new 40bit increment value to prepare @@ -5120,6 +5254,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) u8 tmr_idx; int err; + if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update) + return ice_ptp_prep_phy_incval_ll_e810(hw, incval); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; low = lower_32_bits(incval); high = upper_32_bits(incval); @@ -5404,6 +5541,8 @@ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) ptp->phy_model = ICE_PHY_E810; ptp->num_lports = 8; ptp->ports_per_phy = 4; + + init_waitqueue_head(&ptp->phy.e810.atqbal_wq); } /* Device agnostic functions @@ -5478,7 +5617,7 @@ void ice_ptp_init_hw(struct ice_hw *hw) else if (ice_is_e810(hw)) ice_ptp_init_phy_e810(ptp); else if (ice_is_e825c(hw)) - ice_ptp_init_phy_e825c(hw); + ice_ptp_init_phy_e825(hw); else ptp->phy_model = ICE_PHY_UNSUP; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 1cee0f1bba2d..6779ce120515 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g { * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L - * @pps_delay: propagation delay of the PPS output signal * * Characteristic information for the various TIME_REF sources possible in the * E822 devices @@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g { struct ice_time_ref_info_e82x { u64 pll_freq; u64 nominal_incval; - u8 pps_delay; }; /** @@ -326,8 +324,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; */ #define ICE_E810_PLL_FREQ 812500000 #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL -#define ICE_E810_OUT_PROP_DELAY_NS 1 -#define ICE_E825C_OUT_PROP_DELAY_NS 11 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); @@ -389,11 +385,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) return e82x_time_ref[time_ref].nominal_incval; } -static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) -{ - return e82x_time_ref[time_ref].pps_delay; -} - /* E822 Vernier calibration functions */ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); @@ -432,20 +423,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); #define ICE_ETH56G_NOMINAL_THRESH4 0x7777 #define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 -static inline u64 ice_prop_delay(const struct ice_hw *hw) -{ - switch (hw->ptp.phy_model) { - case ICE_PHY_ETH56G: - return ICE_E825C_OUT_PROP_DELAY_NS; - case ICE_PHY_E810: - return ICE_E810_OUT_PROP_DELAY_NS; - case ICE_PHY_E82X: - return ice_e82x_pps_delay(ice_e82x_time_ref(hw)); - default: - return 0; - } -} - /** * ice_get_base_incval - Get base clock increment value * @hw: pointer to the HW struct @@ -689,11 +666,18 @@ static inline bool ice_is_dual(struct ice_hw *hw) #define BYTES_PER_IDX_ADDR_L 4 /* Tx timestamp low latency read definitions */ -#define TS_LL_READ_RETRIES 200 -#define TS_LL_READ_TS_HIGH GENMASK(23, 16) -#define TS_LL_READ_TS_IDX GENMASK(29, 24) -#define TS_LL_READ_TS_INTR BIT(30) -#define TS_LL_READ_TS BIT(31) +#define REG_LL_PROXY_H_TIMEOUT_US 2000 +#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6) +#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1 +#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2 +#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16) +#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24) +#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24) +#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30) +#define REG_LL_PROXY_H_EXEC BIT(31) + +#define REG_LL_PROXY_L PF_SB_ATQBAH +#define REG_LL_PROXY_H PF_SB_ATQBAL /* Internal PHY timestamp address */ #define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U)) diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 970a99a52bf1..fb7a1b9a4313 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -4,7 +4,7 @@ #include "ice.h" #include "ice_eswitch.h" #include "devlink/devlink.h" -#include "devlink/devlink_port.h" +#include "devlink/port.h" #include "ice_sriov.h" #include "ice_tc_lib.h" #include "ice_dcb_lib.h" diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c index 75d7147e1c01..1a2c94375ca7 100644 --- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c @@ -5,8 +5,8 @@ #include "ice_txrx.h" #include "ice_fltr.h" #include "ice_sf_eth.h" -#include "devlink/devlink_port.h" #include "devlink/devlink.h" +#include "devlink/port.h" static const struct net_device_ops ice_sf_netdev_ops = { .ndo_open = ice_open, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 0e740342e294..4a91e0aaf0a5 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -4784,7 +4784,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, */ if (found && recp[i].tun_type == rinfo->tun_type && recp[i].need_pass_l2 == rinfo->need_pass_l2 && - recp[i].allow_pass_l2 == rinfo->allow_pass_l2) + recp[i].allow_pass_l2 == rinfo->allow_pass_l2 && + recp[i].priority == rinfo->priority) return i; /* Return the recipe ID */ } } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 5d2d7736fd5f..9c9ea4c1b93b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -527,15 +527,14 @@ err: * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action - * @rx_buf: Rx buffer to store the XDP action * @eop_desc: Last descriptor in packet to read metadata from * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ -static void +static u32 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, - struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc) + union ice_32b_rx_flex_desc *eop_desc) { unsigned int ret = ICE_XDP_PASS; u32 act; @@ -574,7 +573,7 @@ out_failure: ret = ICE_XDP_CONSUMED; } exit: - ice_set_rx_bufs_act(xdp, rx_ring, ret); + return ret; } /** @@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, xdp_buff_set_frags_flag(xdp); } - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); + if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) return -ENOMEM; - } __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, rx_buf->page_offset, size); @@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, struct ice_rx_buf *rx_buf; rx_buf = &rx_ring->rx_buf[ntc]; - rx_buf->pgcnt = page_count(rx_buf->page); prefetchw(rx_buf->page); if (!size) @@ -941,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, } /** + * ice_get_pgcnts - grab page_count() for gathered fragments + * @rx_ring: Rx descriptor ring to store the page counts on + * + * This function is intended to be called right before running XDP + * program so that the page recycling mechanism will be able to take + * a correct decision regarding underlying pages; this is done in such + * way as XDP program can change the refcount of page + */ +static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) +{ + u32 nr_frags = rx_ring->nr_frags + 1; + u32 idx = rx_ring->first_desc; + struct ice_rx_buf *rx_buf; + u32 cnt = rx_ring->count; + + for (int i = 0; i < nr_frags; i++) { + rx_buf = &rx_ring->rx_buf[idx]; + rx_buf->pgcnt = page_count(rx_buf->page); + + if (++idx == cnt) + idx = 0; + } +} + +/** * ice_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data @@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) rx_buf->page_offset + headlen, size, xdp->frame_sz); } else { - /* buffer is unused, change the act that should be taken later - * on; data was copied onto skb's linear part so there's no + /* buffer is unused, restore biased page count in Rx buffer; + * data was copied onto skb's linear part so there's no * need for adjusting page offset and we can reuse this buffer * as-is */ - rx_buf->act = ICE_SKB_CONSUMED; + rx_buf->pagecnt_bias++; } if (unlikely(xdp_buff_has_frags(xdp))) { @@ -1104,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) } /** + * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags + * @rx_ring: Rx ring with all the auxiliary data + * @xdp: XDP buffer carrying linear + frags part + * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage + * @ntc: a current next_to_clean value to be stored at rx_ring + * @verdict: return code from XDP program execution + * + * Walk through gathered fragments and satisfy internal page + * recycle mechanism; we take here an action related to verdict + * returned by XDP program; + */ +static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + u32 *xdp_xmit, u32 ntc, u32 verdict) +{ + u32 nr_frags = rx_ring->nr_frags + 1; + u32 idx = rx_ring->first_desc; + u32 cnt = rx_ring->count; + u32 post_xdp_frags = 1; + struct ice_rx_buf *buf; + int i; + + if (unlikely(xdp_buff_has_frags(xdp))) + post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; + + for (i = 0; i < post_xdp_frags; i++) { + buf = &rx_ring->rx_buf[idx]; + + if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + *xdp_xmit |= verdict; + } else if (verdict & ICE_XDP_CONSUMED) { + buf->pagecnt_bias++; + } else if (verdict == ICE_XDP_PASS) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + } + + ice_put_rx_buf(rx_ring, buf); + + if (++idx == cnt) + idx = 0; + } + /* handle buffers that represented frags released by XDP prog; + * for these we keep pagecnt_bias as-is; refcount from struct page + * has been decremented within XDP prog and we do not have to increase + * the biased refcnt + */ + for (; i < nr_frags; i++) { + buf = &rx_ring->rx_buf[idx]; + ice_put_rx_buf(rx_ring, buf); + if (++idx == cnt) + idx = 0; + } + + xdp->data = NULL; + rx_ring->first_desc = ntc; + rx_ring->nr_frags = 0; +} + +/** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int offset = rx_ring->rx_offset; struct xdp_buff *xdp = &rx_ring->xdp; - u32 cached_ntc = rx_ring->first_desc; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; + u32 cached_ntu, xdp_verdict; u32 cnt = rx_ring->count; u32 xdp_xmit = 0; - u32 cached_ntu; bool failure; - u32 first; xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (xdp_prog) { @@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); xdp_buff_clear_frags_flag(xdp); } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { + ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); break; } if (++ntc == cnt) @@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc); - if (rx_buf->act == ICE_XDP_PASS) + ice_get_pgcnts(rx_ring); + xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); + if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); total_rx_pkts++; - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; + ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + continue; construct_skb: if (likely(ice_ring_uses_build_skb(rx_ring))) @@ -1217,18 +1296,12 @@ construct_skb: /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->ring_stats->rx_stats.alloc_page_failed++; - rx_buf->act = ICE_XDP_CONSUMED; - if (unlikely(xdp_buff_has_frags(xdp))) - ice_set_rx_bufs_act(xdp, rx_ring, - ICE_XDP_CONSUMED); - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; - break; + xdp_verdict = ICE_XDP_CONSUMED; } - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; + ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + + if (!skb) + break; stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, @@ -1257,23 +1330,6 @@ construct_skb: total_rx_pkts++; } - first = rx_ring->first_desc; - while (cached_ntc != first) { - struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; - - if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - xdp_xmit |= buf->act; - } else if (buf->act & ICE_XDP_CONSUMED) { - buf->pagecnt_bias++; - } else if (buf->act == ICE_XDP_PASS) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - } - - ice_put_rx_buf(rx_ring, buf); - if (++cached_ntc >= cnt) - cached_ntc = 0; - } rx_ring->next_to_clean = ntc; /* return up to cleaned_count buffers to hardware */ failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index cb347c852ba9..806bce701df3 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -201,7 +201,6 @@ struct ice_rx_buf { struct page *page; unsigned int page_offset; unsigned int pgcnt; - unsigned int act; unsigned int pagecnt_bias; }; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index 79f960c6680d..6cf32b404127 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -6,49 +6,6 @@ #include "ice.h" /** - * ice_set_rx_bufs_act - propagate Rx buffer action to frags - * @xdp: XDP buffer representing frame (linear and frags part) - * @rx_ring: Rx ring struct - * act: action to store onto Rx buffers related to XDP buffer parts - * - * Set action that should be taken before putting Rx buffer from first frag - * to the last. - */ -static inline void -ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, - const unsigned int act) -{ - u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - u32 nr_frags = rx_ring->nr_frags + 1; - u32 idx = rx_ring->first_desc; - u32 cnt = rx_ring->count; - struct ice_rx_buf *buf; - - for (int i = 0; i < nr_frags; i++) { - buf = &rx_ring->rx_buf[idx]; - buf->act = act; - - if (++idx == cnt) - idx = 0; - } - - /* adjust pagecnt_bias on frags freed by XDP prog */ - if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) { - u32 delta = rx_ring->nr_frags - sinfo_frags; - - while (delta) { - if (idx == 0) - idx = cnt - 1; - else - idx--; - buf = &rx_ring->rx_buf[idx]; - buf->pagecnt_bias--; - delta--; - } - } -} - -/** * ice_test_staterr - tests bits in Rx descriptor status and error fields * @status_err_n: Rx descriptor status_error0 or status_error1 bits * @stat_err_bits: value to mask diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index adb168860711..33a1a5934c0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -18,6 +18,7 @@ #include "ice_sbq_cmd.h" #include "ice_vlan_mode.h" #include "ice_fwlog.h" +#include <linux/wait.h> static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -368,6 +369,7 @@ struct ice_ts_func_info { #define ICE_TS_TMR1_ENA_M BIT(26) #define ICE_TS_LL_TX_TS_READ_M BIT(28) #define ICE_TS_LL_TX_TS_INT_READ_M BIT(29) +#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30) struct ice_ts_dev_info { /* Device specific info */ @@ -382,6 +384,7 @@ struct ice_ts_dev_info { u8 tmr1_ena; u8 ts_ll_read; u8 ts_ll_int_read; + u8 ll_phy_tmr_update; }; #define ICE_NAC_TOPO_PRIMARY_M BIT(0) @@ -848,15 +851,23 @@ struct ice_mbx_data { #define ICE_PORTS_PER_QUAD 4 #define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD) +#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0) + +struct ice_e810_params { + /* The wait queue lock also protects the low latency interface */ + wait_queue_head_t atqbal_wq; + unsigned int atqbal_flags; +}; + struct ice_eth56g_params { u8 num_phys; - u8 phy_addr[2]; bool onestep_ena; bool sfd_ena; u32 peer_delay; }; union ice_phy_params { + struct ice_e810_params e810; struct ice_eth56g_params eth56g; }; @@ -881,7 +892,6 @@ struct ice_ptp_hw { union ice_phy_params phy; u8 num_lports; u8 ports_per_phy; - bool is_2x50g_muxed_topo; }; /* Port hardware description */ @@ -1216,4 +1226,9 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 #define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 +/* AQ API version for Health Status support */ +#define ICE_FW_API_HEALTH_REPORT_MAJ 1 +#define ICE_FW_API_HEALTH_REPORT_MIN 7 +#define ICE_FW_API_HEALTH_REPORT_PATCH 6 + #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 334ae945d640..8975d2971bc3 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -801,35 +801,6 @@ out_failure: return result; } -static int -ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first, - struct xdp_buff *xdp, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); - - if (!size) - return 0; - - if (!xdp_buff_has_frags(first)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(first); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - xsk_buff_free(first); - return -ENOMEM; - } - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, - virt_to_page(xdp->data_hard_start), - XDP_PACKET_HEADROOM, size); - sinfo->xdp_frags_size += size; - xsk_buff_add_frag(xdp); - - return 0; -} - /** * ice_clean_rx_irq_zc - consumes packets from the hardware ring * @rx_ring: AF_XDP Rx ring @@ -895,7 +866,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, if (!first) { first = xdp; - } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) { + } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) { + xsk_buff_free(first); break; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index 4849590a5591..b28991dd1870 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -376,6 +376,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD)) break; + /* Ensure no other fields are read until DD flag is checked */ + dma_rmb(); + /* strip off FW internal code */ desc_err = le16_to_cpu(desc->ret_val) & 0xff; @@ -563,6 +566,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, if (!(flags & IDPF_CTLQ_FLAG_DD)) break; + /* Ensure no other fields are read until DD flag is checked */ + dma_rmb(); + q_msg[i].vmvf_type = (flags & (IDPF_CTLQ_FLAG_FTYPE_VM | IDPF_CTLQ_FLAG_FTYPE_PF)) >> diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 6c913a703df6..41e4bd49402a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -101,6 +101,9 @@ static int idpf_intr_reg_init(struct idpf_vport *vport) intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S; intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S; intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M; + intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M; + intr->dyn_ctl_sw_itridx_ena_m = + PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, IDPF_PF_ITR_IDX_SPACING); diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index b4fbb99bfad2..a3d6b8f198a8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2159,8 +2159,13 @@ static int idpf_open(struct net_device *netdev) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + err = idpf_set_real_num_queues(vport); + if (err) + goto unlock; + err = idpf_vport_open(vport); +unlock: idpf_vport_ctrl_unlock(netdev); return err; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index f71d3182580b..b6c515d14cbf 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -174,7 +174,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_set_drvdata(pdev, adapter); - adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0, + adapter->init_wq = alloc_workqueue("%s-%s-init", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->init_wq) { @@ -183,7 +184,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free; } - adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0, + adapter->serv_wq = alloc_workqueue("%s-%s-service", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->serv_wq) { @@ -192,7 +194,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_serv_wq_alloc; } - adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0, + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->mbx_wq) { @@ -201,7 +204,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_mbx_wq_alloc; } - adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0, + adapter->stats_wq = alloc_workqueue("%s-%s-stats", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->stats_wq) { @@ -210,7 +214,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_stats_wq_alloc; } - adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0, + adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->vc_event_wq) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 34f4118c7bc0..9be6a6b59c4e 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3008,8 +3008,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, return -EINVAL; rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); - if (unlikely(rsc_segments == 1)) - return 0; NAPI_GRO_CB(skb)->count = rsc_segments; skb_shinfo(skb)->gso_size = rsc_seg_len; @@ -3072,6 +3070,7 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, idpf_rx_hash(rxq, skb, rx_desc, decoded); skb->protocol = eth_type_trans(skb, rxq->netdev); + skb_record_rx_queue(skb, rxq->idx); if (le16_get_bits(rx_desc->hdrlen_flags, VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M)) @@ -3080,8 +3079,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc); idpf_rx_csum(rxq, skb, csum_bits, decoded); - skb_record_rx_queue(skb, rxq->idx); - return 0; } @@ -3604,21 +3601,31 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) /** * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings * @q_vector: pointer to q_vector - * @type: itr index - * @itr: itr value */ -static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector, - const int type, u16 itr) +static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector) { - u32 itr_val; + u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m; + int type = IDPF_NO_ITR_UPDATE_IDX; + u16 itr = 0; + + if (q_vector->wb_on_itr) { + /* + * Trigger a software interrupt when exiting wb_on_itr, to make + * sure we catch any pending write backs that might have been + * missed due to interrupt state transition. + */ + itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m | + q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m; + type = IDPF_SW_ITR_UPDATE_IDX; + itr = IDPF_ITR_20K; + } itr &= IDPF_ITR_MASK; /* Don't clear PBA because that can cause lost interrupts that * came in while we were cleaning/polling */ - itr_val = q_vector->intr_reg.dyn_ctl_intena_m | - (type << q_vector->intr_reg.dyn_ctl_itridx_s) | - (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); + itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) | + (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); return itr_val; } @@ -3716,9 +3723,8 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) /* net_dim() updates ITR out-of-band using a work item */ idpf_net_dim(q_vector); + intval = idpf_vport_intr_buildreg_itr(q_vector); q_vector->wb_on_itr = false; - intval = idpf_vport_intr_buildreg_itr(q_vector, - IDPF_NO_ITR_UPDATE_IDX, 0); writel(intval, q_vector->intr_reg.dyn_ctl); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 9c1fe84108ed..0f71a6f5557b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -354,6 +354,8 @@ struct idpf_vec_regs { * @dyn_ctl_itridx_m: Mask for ITR index * @dyn_ctl_intrvl_s: Register bit offset for ITR interval * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature + * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index + * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable * @rx_itr: RX ITR register * @tx_itr: TX ITR register * @icr_ena: Interrupt cause register offset @@ -367,6 +369,8 @@ struct idpf_intr_reg { u32 dyn_ctl_itridx_m; u32 dyn_ctl_intrvl_s; u32 dyn_ctl_wb_on_itr_m; + u32 dyn_ctl_sw_itridx_ena_m; + u32 dyn_ctl_swint_trig_m; void __iomem *rx_itr; void __iomem *tx_itr; void __iomem *icr_ena; @@ -437,7 +441,7 @@ struct idpf_q_vector { cpumask_var_t affinity_mask; __cacheline_group_end_aligned(cold); }; -libeth_cacheline_set_assert(struct idpf_q_vector, 112, +libeth_cacheline_set_assert(struct idpf_q_vector, 120, 24 + sizeof(struct napi_struct) + 2 * sizeof(struct dim), 8 + sizeof(cpumask_var_t)); @@ -471,6 +475,8 @@ struct idpf_tx_queue_stats { #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode) #define IDPF_ITR_TX_DEF IDPF_ITR_20K #define IDPF_ITR_RX_DEF IDPF_ITR_20K +/* Index used for 'SW ITR' update in DYN_CTL register */ +#define IDPF_SW_ITR_UPDATE_IDX 2 /* Index used for 'No ITR' update in DYN_CTL register */ #define IDPF_NO_ITR_UPDATE_IDX 3 #define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt) diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index aad62e270ae4..aba828abcb17 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -101,6 +101,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport) intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S; intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S; intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M; + intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M; + intr->dyn_ctl_sw_itridx_ena_m = + VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M; spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, IDPF_VF_ITR_IDX_SPACING); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index d46c95f91b0d..3d2413b8684f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -517,8 +517,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, retval = -ENXIO; goto only_unlock; case IDPF_VC_XN_WAITING: - dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n", - params->vc_op, params->timeout_ms); + dev_notice_ratelimited(&adapter->pdev->dev, + "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", + params->vc_op, cookie, xn->vc_op, + xn->salt, params->timeout_ms); retval = -ETIME; break; case IDPF_VC_XN_COMPLETED_SUCCESS: @@ -612,14 +614,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, return -EINVAL; } xn = &adapter->vcxn_mngr->ring[xn_idx]; + idpf_vc_xn_lock(xn); salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); if (xn->salt != salt) { - dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n", - xn->salt, salt); + dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n", + xn->vc_op, xn->salt, xn->state, + ctlq_msg->cookie.mbx.chnl_opcode, salt); + idpf_vc_xn_unlock(xn); return -EINVAL; } - idpf_vc_xn_lock(xn); switch (xn->state) { case IDPF_VC_XN_WAITING: /* success */ @@ -3077,12 +3081,21 @@ init_failed: */ void idpf_vc_core_deinit(struct idpf_adapter *adapter) { + bool remove_in_prog; + if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) return; + /* Avoid transaction timeouts when called during reset */ + remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); + if (!remove_in_prog) + idpf_vc_xn_shutdown(adapter->vcxn_mngr); + idpf_deinit_task(adapter); idpf_intr_rel(adapter); - idpf_vc_xn_shutdown(adapter->vcxn_mngr); + + if (remove_in_prog) + idpf_vc_xn_shutdown(adapter->vcxn_mngr); cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->mbx_task); diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index 463c0d26b9d4..6c1b702fd992 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o igb-y := igb_main.o igb_ethtool.o e1000_82575.o \ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ - e1000_i210.o igb_ptp.o igb_hwmon.o + e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 3c2dc7bdebb5..02f340280d20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -18,8 +18,10 @@ #include <linux/i2c-algo-bit.h> #include <linux/pci.h> #include <linux/mdio.h> +#include <linux/lockdep.h> #include <net/xdp.h> +#include <net/xdp_sock_drv.h> struct igb_adapter; @@ -86,6 +88,7 @@ struct igb_adapter; #define IGB_XDP_CONSUMED BIT(0) #define IGB_XDP_TX BIT(1) #define IGB_XDP_REDIR BIT(2) +#define IGB_XDP_EXIT BIT(3) struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; @@ -255,6 +258,7 @@ enum igb_tx_flags { enum igb_tx_buf_type { IGB_TYPE_SKB = 0, IGB_TYPE_XDP, + IGB_TYPE_XSK }; /* wrapper around a pointer to a socket buffer, @@ -320,6 +324,7 @@ struct igb_ring { union { /* array of buffer info structs */ struct igb_tx_buffer *tx_buffer_info; struct igb_rx_buffer *rx_buffer_info; + struct xdp_buff **rx_buffer_info_zc; }; void *desc; /* descriptor ring memory */ unsigned long flags; /* ring specific flags */ @@ -357,6 +362,7 @@ struct igb_ring { }; }; struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; } ____cacheline_internodealigned_in_smp; struct igb_q_vector { @@ -384,7 +390,8 @@ enum e1000_ring_flags_t { IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_TX_CTX_IDX, - IGB_RING_FLAG_TX_DETECT_HANG + IGB_RING_FLAG_TX_DETECT_HANG, + IGB_RING_FLAG_TX_DISABLED }; #define ring_uses_large_buffer(ring) \ @@ -731,12 +738,21 @@ int igb_setup_tx_resources(struct igb_ring *); int igb_setup_rx_resources(struct igb_ring *); void igb_free_tx_resources(struct igb_ring *); void igb_free_rx_resources(struct igb_ring *); +void igb_clean_tx_ring(struct igb_ring *tx_ring); +void igb_clean_rx_ring(struct igb_ring *rx_ring); void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status); +void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets, + unsigned int bytes); void igb_setup_tctl(struct igb_adapter *); void igb_setup_rctl(struct igb_adapter *); void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp); +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb); void igb_alloc_rx_buffers(struct igb_ring *, u16); void igb_update_stats(struct igb_adapter *); bool igb_has_link(struct igb_adapter *adapter); @@ -797,6 +813,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); } +/* This function assumes __netif_tx_lock is held by the caller. */ +static inline void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + lockdep_assert_held(&txring_txq(ring)->_xmit_lock); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) +{ + return !!READ_ONCE(adapter->xdp_prog); +} + int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input); int igb_erase_filter(struct igb_adapter *adapter, @@ -807,4 +850,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter, int igb_del_mac_steering_filter(struct igb_adapter *adapter, const u8 *addr, u8 queue, u8 flags); +struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter, + struct igb_ring *ring); +int igb_xsk_pool_setup(struct igb_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid); +bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count); +void igb_clean_rx_ring_zc(struct igb_ring *rx_ring); +int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, + struct xsk_buff_pool *xsk_pool, const int budget); +bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool); +int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 288a4bb2683a..d368b753a467 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -33,7 +33,6 @@ #include <linux/bpf_trace.h> #include <linux/pm_runtime.h> #include <linux/etherdevice.h> -#include <linux/lockdep.h> #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif @@ -116,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); -static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(struct timer_list *); static void igb_watchdog(struct timer_list *); @@ -475,12 +472,17 @@ rx_ring_summary: for (i = 0; i < rx_ring->count; i++) { const char *next_desc; - struct igb_rx_buffer *buffer_info; - buffer_info = &rx_ring->rx_buffer_info[i]; + dma_addr_t dma = (dma_addr_t)0; + struct igb_rx_buffer *buffer_info = NULL; rx_desc = IGB_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (!rx_ring->xsk_pool) { + buffer_info = &rx_ring->rx_buffer_info[i]; + dma = buffer_info->dma; + } + if (i == rx_ring->next_to_use) next_desc = " NTU"; else if (i == rx_ring->next_to_clean) @@ -500,11 +502,11 @@ rx_ring_summary: "R ", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)buffer_info->dma, + (u64)dma, next_desc); if (netif_msg_pktdata(adapter) && - buffer_info->dma && buffer_info->page) { + buffer_info && dma && buffer_info->page) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, @@ -1990,7 +1992,11 @@ static void igb_configure(struct igb_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; - igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + if (ring->xsk_pool) + igb_alloc_rx_buffers_zc(ring, ring->xsk_pool, + igb_desc_unused(ring)); + else + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); } } @@ -2911,37 +2917,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) { + struct igb_adapter *adapter = netdev_priv(dev); + switch (xdp->command) { case XDP_SETUP_PROG: return igb_xdp_setup(dev, xdp); + case XDP_SETUP_XSK_POOL: + return igb_xsk_pool_setup(adapter, xdp->xsk.pool, + xdp->xsk.queue_id); default: return -EINVAL; } } -/* This function assumes __netif_tx_lock is held by the caller. */ -static void igb_xdp_ring_update_tail(struct igb_ring *ring) -{ - lockdep_assert_held(&txring_txq(ring)->_xmit_lock); - - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); -} - -static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) -{ - unsigned int r_idx = smp_processor_id(); - - if (r_idx >= adapter->num_tx_queues) - r_idx = r_idx % adapter->num_tx_queues; - - return adapter->tx_ring[r_idx]; -} - -static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); int cpu = smp_processor_id(); @@ -2955,7 +2944,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + tx_ring = igb_xdp_is_enabled(adapter) ? + igb_xdp_tx_queue_mapping(adapter) : NULL; if (unlikely(!tx_ring)) return IGB_XDP_CONSUMED; @@ -2988,10 +2978,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n, /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + tx_ring = igb_xdp_is_enabled(adapter) ? + igb_xdp_tx_queue_mapping(adapter) : NULL; if (unlikely(!tx_ring)) return -ENXIO; + if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) + return -ENXIO; + nq = txring_txq(tx_ring); __netif_tx_lock(nq, cpu); @@ -3042,6 +3036,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_setup_tc = igb_setup_tc, .ndo_bpf = igb_xdp, .ndo_xdp_xmit = igb_xdp_xmit, + .ndo_xsk_wakeup = igb_xsk_wakeup, }; /** @@ -3338,7 +3333,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; @@ -4364,6 +4360,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, u64 tdba = ring->dma; int reg_idx = ring->reg_idx; + WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); + wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), @@ -4424,7 +4422,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, - rx_ring->queue_index, 0); + rx_ring->queue_index, + rx_ring->q_vector->napi.napi_id); if (res < 0) { dev_err(dev, "Failed to register xdp_rxq index %u\n", rx_ring->queue_index); @@ -4720,12 +4719,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) struct e1000_hw *hw = &adapter->hw; int reg_idx = ring->reg_idx; u32 srrctl = 0; + u32 buf_size; - srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - if (ring_uses_large_buffer(ring)) - srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGB_RXBUFFER_3072; else - srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + buf_size = IGB_RXBUFFER_2048; + + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; @@ -4757,8 +4761,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, u32 rxdctl = 0; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL)); + WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } /* disable the queue */ wr32(E1000_RXDCTL(reg_idx), 0); @@ -4785,9 +4798,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; - /* initialize rx_buffer_info */ - memset(ring->rx_buffer_info, 0, - sizeof(struct igb_rx_buffer) * ring->count); + if (ring->xsk_pool) + memset(ring->rx_buffer_info_zc, 0, + sizeof(*ring->rx_buffer_info_zc) * ring->count); + else + memset(ring->rx_buffer_info, 0, + sizeof(*ring->rx_buffer_info) * ring->count); /* initialize Rx descriptor 0 */ rx_desc = IGB_RX_DESC(ring, 0); @@ -4888,19 +4904,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) * igb_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ -static void igb_clean_tx_ring(struct igb_ring *tx_ring) +void igb_clean_tx_ring(struct igb_ring *tx_ring) { u16 i = tx_ring->next_to_clean; struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; while (i != tx_ring->next_to_use) { union e1000_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs or xdp frames */ - if (tx_buffer->type == IGB_TYPE_SKB) + if (tx_buffer->type == IGB_TYPE_SKB) { dev_kfree_skb_any(tx_buffer->skb); - else + } else if (tx_buffer->type == IGB_TYPE_XDP) { xdp_return_frame(tx_buffer->xdpf); + } else if (tx_buffer->type == IGB_TYPE_XSK) { + xsk_frames++; + goto skip_for_xsk; + } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -4931,6 +4952,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); } +skip_for_xsk: tx_buffer->next_to_watch = NULL; /* move us one more past the eop_desc for start of next pkt */ @@ -4945,6 +4967,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) /* reset BQL for queue */ netdev_tx_reset_queue(txring_txq(tx_ring)); + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -4975,8 +5000,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) rx_ring->xdp_prog = NULL; xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; + if (rx_ring->xsk_pool) { + vfree(rx_ring->rx_buffer_info_zc); + rx_ring->rx_buffer_info_zc = NULL; + } else { + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + } /* if not set, then don't free */ if (!rx_ring->desc) @@ -5007,13 +5037,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) +void igb_clean_rx_ring(struct igb_ring *rx_ring) { u16 i = rx_ring->next_to_clean; dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; + if (rx_ring->xsk_pool) { + igb_clean_rx_ring_zc(rx_ring); + goto skip_for_xsk; + } + /* Free all the Rx ring sk_buffs */ while (i != rx_ring->next_to_alloc) { struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; @@ -5041,6 +5076,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) i = 0; } +skip_for_xsk: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -6467,6 +6503,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) + return NETDEV_TX_BUSY; + /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->type = IGB_TYPE_SKB; @@ -6622,7 +6661,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) struct igb_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; - if (adapter->xdp_prog) { + if (igb_xdp_is_enabled(adapter)) { int i; for (i = 0; i < adapter->num_rx_queues; i++) { @@ -8195,6 +8234,7 @@ static int igb_poll(struct napi_struct *napi, int budget) struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi); + struct xsk_buff_pool *xsk_pool; bool clean_complete = true; int work_done = 0; @@ -8206,7 +8246,12 @@ static int igb_poll(struct napi_struct *napi, int budget) clean_complete = igb_clean_tx_irq(q_vector, budget); if (q_vector->rx.ring) { - int cleaned = igb_clean_rx_irq(q_vector, budget); + int cleaned; + + xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool); + cleaned = xsk_pool ? + igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) : + igb_clean_rx_irq(q_vector, budget); work_done += cleaned; if (cleaned >= budget) @@ -8235,13 +8280,18 @@ static int igb_poll(struct napi_struct *napi, int budget) **/ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) { - struct igb_adapter *adapter = q_vector->adapter; - struct igb_ring *tx_ring = q_vector->tx.ring; - struct igb_tx_buffer *tx_buffer; - union e1000_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; + struct igb_adapter *adapter = q_vector->adapter; unsigned int budget = q_vector->tx.work_limit; + struct igb_ring *tx_ring = q_vector->tx.ring; unsigned int i = tx_ring->next_to_clean; + union e1000_adv_tx_desc *tx_desc; + struct igb_tx_buffer *tx_buffer; + struct xsk_buff_pool *xsk_pool; + int cpu = smp_processor_id(); + bool xsk_xmit_done = true; + struct netdev_queue *nq; + u32 xsk_frames = 0; if (test_bit(__IGB_DOWN, &adapter->state)) return true; @@ -8272,10 +8322,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - if (tx_buffer->type == IGB_TYPE_SKB) + if (tx_buffer->type == IGB_TYPE_SKB) { napi_consume_skb(tx_buffer->skb, napi_budget); - else + } else if (tx_buffer->type == IGB_TYPE_XDP) { xdp_return_frame(tx_buffer->xdpf); + } else if (tx_buffer->type == IGB_TYPE_XSK) { + xsk_frames++; + goto skip_for_xsk; + } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -8307,6 +8361,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } } +skip_for_xsk: /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; @@ -8335,6 +8390,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + xsk_pool = READ_ONCE(tx_ring->xsk_pool); + if (xsk_pool) { + if (xsk_frames) + xsk_tx_completed(xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(xsk_pool)) + xsk_set_tx_need_wakeup(xsk_pool); + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool); + __netif_tx_unlock(nq); + } + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; @@ -8397,7 +8467,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } } - return !!budget; + return !!budget && xsk_xmit_done; } /** @@ -8588,9 +8658,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, return skb; } -static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, - struct igb_ring *rx_ring, - struct xdp_buff *xdp) +static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring, + struct xdp_buff *xdp) { int err, result = IGB_XDP_PASS; struct bpf_prog *xdp_prog; @@ -8630,7 +8699,7 @@ out_failure: break; } xdp_out: - return ERR_PTR(-result); + return result; } static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, @@ -8756,10 +8825,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - if (unlikely((igb_test_staterr(rx_desc, E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { struct net_device *netdev = rx_ring->netdev; @@ -8786,9 +8851,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ -static void igb_process_skb_fields(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; @@ -8870,6 +8935,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, rx_buffer->page = NULL; } +void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + + if (status & IGB_XDP_REDIR) + xdp_do_flush(); + + if (status & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + igb_xdp_ring_update_tail(tx_ring); + __netif_tx_unlock(nq); + } +} + +void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets, + unsigned int bytes) +{ + struct igb_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { unsigned int total_bytes = 0, total_packets = 0; @@ -8877,12 +8974,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) struct igb_ring *rx_ring = q_vector->rx.ring; u16 cleaned_count = igb_desc_unused(rx_ring); struct sk_buff *skb = rx_ring->skb; - int cpu = smp_processor_id(); unsigned int xdp_xmit = 0; - struct netdev_queue *nq; struct xdp_buff xdp; u32 frame_sz = 0; int rx_buf_pgcnt; + int xdp_res = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -8940,12 +9036,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); #endif - skb = igb_run_xdp(adapter, rx_ring, &xdp); + xdp_res = igb_run_xdp(adapter, rx_ring, &xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { xdp_xmit |= xdp_res; igb_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -8964,7 +9058,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) &xdp, timestamp); /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; break; @@ -8978,7 +9072,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) continue; /* verify the packet layout is correct */ - if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -9001,24 +9095,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; - if (xdp_xmit & IGB_XDP_REDIR) - xdp_do_flush(); - - if (xdp_xmit & IGB_XDP_TX) { - struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); - - nq = txring_txq(tx_ring); - __netif_tx_lock(nq, cpu); - igb_xdp_ring_update_tail(tx_ring); - __netif_tx_unlock(nq); - } + if (xdp_xmit) + igb_finalize_xdp(adapter, xdp_xmit); - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.packets += total_packets; - rx_ring->rx_stats.bytes += total_bytes; - u64_stats_update_end(&rx_ring->rx_syncp); - q_vector->rx.total_packets += total_packets; - q_vector->rx.total_bytes += total_bytes; + igb_update_rx_stats(q_vector, total_packets, total_bytes); if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c new file mode 100644 index 000000000000..157d43787fa0 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_xsk.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock_drv.h> +#include <net/xdp.h> + +#include "e1000_hw.h" +#include "igb.h" + +static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present) +{ + int size = pool_present ? + sizeof(*ring->rx_buffer_info_zc) * ring->count : + sizeof(*ring->rx_buffer_info) * ring->count; + void *buff_info = vmalloc(size); + + if (!buff_info) + return -ENOMEM; + + if (pool_present) { + vfree(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + ring->rx_buffer_info_zc = buff_info; + } else { + vfree(ring->rx_buffer_info_zc); + ring->rx_buffer_info_zc = NULL; + ring->rx_buffer_info = buff_info; + } + + return 0; +} + +static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid) +{ + struct igb_ring *tx_ring = adapter->tx_ring[qid]; + struct igb_ring *rx_ring = adapter->rx_ring[qid]; + struct e1000_hw *hw = &adapter->hw; + + set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); + + wr32(E1000_TXDCTL(tx_ring->reg_idx), 0); + wr32(E1000_RXDCTL(rx_ring->reg_idx), 0); + + synchronize_net(); + + /* Rx/Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + + igb_clean_tx_ring(tx_ring); + igb_clean_rx_ring(rx_ring); + + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); +} + +static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid) +{ + struct igb_ring *tx_ring = adapter->tx_ring[qid]; + struct igb_ring *rx_ring = adapter->rx_ring[qid]; + + igb_configure_tx_ring(adapter, tx_ring); + igb_configure_rx_ring(adapter, rx_ring); + + synchronize_net(); + + clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + if (rx_ring->xsk_pool) + igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool, + igb_desc_unused(rx_ring)); + else + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); + + /* Rx/Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); +} + +struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + int qid = ring->queue_index; + struct xsk_buff_pool *pool; + + pool = xsk_get_pool_from_qid(adapter->netdev, qid); + + if (!igb_xdp_is_enabled(adapter)) + return NULL; + + return (pool && pool->dev) ? pool : NULL; +} + +static int igb_xsk_pool_enable(struct igb_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct net_device *netdev = adapter->netdev; + struct igb_ring *rx_ring; + bool if_running; + int err; + + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (qid >= netdev->real_num_rx_queues || + qid >= netdev->real_num_tx_queues) + return -EINVAL; + + err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR); + if (err) + return err; + + rx_ring = adapter->rx_ring[qid]; + if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter); + if (if_running) + igb_txrx_ring_disable(adapter, qid); + + if (if_running) { + err = igb_realloc_rx_buffer_info(rx_ring, true); + if (!err) { + igb_txrx_ring_enable(adapter, qid); + /* Kick start the NAPI context so that receiving will start */ + err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); + } + + if (err) { + xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid) +{ + struct xsk_buff_pool *pool; + struct igb_ring *rx_ring; + bool if_running; + int err; + + pool = xsk_get_pool_from_qid(adapter->netdev, qid); + if (!pool) + return -EINVAL; + + rx_ring = adapter->rx_ring[qid]; + if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter); + if (if_running) + igb_txrx_ring_disable(adapter, qid); + + xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR); + + if (if_running) { + err = igb_realloc_rx_buffer_info(rx_ring, false); + if (err) + return err; + + igb_txrx_ring_enable(adapter, qid); + } + + return 0; +} + +int igb_xsk_pool_setup(struct igb_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +{ + return pool ? igb_xsk_pool_enable(adapter, pool, qid) : + igb_xsk_pool_disable(adapter, qid); +} + +static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, + union e1000_adv_rx_desc *rx_desc, u16 count) +{ + dma_addr_t dma; + u16 buffs; + int i; + + /* nothing to do */ + if (!count) + return 0; + + buffs = xsk_buff_alloc_batch(pool, xdp, count); + for (i = 0; i < buffs; i++) { + dma = xsk_buff_xdp_get_dma(*xdp); + rx_desc->read.pkt_addr = cpu_to_le64(dma); + rx_desc->wb.upper.length = 0; + + rx_desc++; + xdp++; + } + + return buffs; +} + +bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count) +{ + u32 nb_buffs_extra = 0, nb_buffs = 0; + union e1000_adv_rx_desc *rx_desc; + u16 ntu = rx_ring->next_to_use; + u16 total_count = count; + struct xdp_buff **xdp; + + rx_desc = IGB_RX_DESC(rx_ring, ntu); + xdp = &rx_ring->rx_buffer_info_zc[ntu]; + + if (ntu + count >= rx_ring->count) { + nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, + rx_ring->count - ntu); + if (nb_buffs_extra != rx_ring->count - ntu) { + ntu += nb_buffs_extra; + goto exit; + } + rx_desc = IGB_RX_DESC(rx_ring, 0); + xdp = rx_ring->rx_buffer_info_zc; + ntu = 0; + count -= nb_buffs_extra; + } + + nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count); + ntu += nb_buffs; + if (ntu == rx_ring->count) + ntu = 0; + + /* clear the length for the next_to_use descriptor */ + rx_desc = IGB_RX_DESC(rx_ring, ntu); + rx_desc->wb.upper.length = 0; + +exit: + if (rx_ring->next_to_use != ntu) { + rx_ring->next_to_use = ntu; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(ntu, rx_ring->tail); + } + + return total_count == (nb_buffs + nb_buffs_extra); +} + +void igb_clean_rx_ring_zc(struct igb_ring *rx_ring) +{ + u16 ntc = rx_ring->next_to_clean; + u16 ntu = rx_ring->next_to_use; + + while (ntc != ntu) { + struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc]; + + xsk_buff_free(xdp); + ntc++; + if (ntc >= rx_ring->count) + ntc = 0; + } +} + +static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring, + struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool, + struct bpf_prog *xdp_prog) +{ + int err, result = IGB_XDP_PASS; + u32 act; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) + return IGB_XDP_REDIR; + + if (xsk_uses_need_wakeup(xsk_pool) && + err == -ENOBUFS) + result = IGB_XDP_EXIT; + else + result = IGB_XDP_CONSUMED; + goto out_failure; + } + + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); + if (result == IGB_XDP_CONSUMED) + goto out_failure; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = IGB_XDP_CONSUMED; + break; + } + + return result; +} + +int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, + struct xsk_buff_pool *xsk_pool, const int budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + struct igb_ring *rx_ring = q_vector->rx.ring; + u32 ntc = rx_ring->next_to_clean; + struct bpf_prog *xdp_prog; + unsigned int xdp_xmit = 0; + bool failure = false; + u16 entries_to_alloc; + struct sk_buff *skb; + + /* xdp_prog cannot be NULL in the ZC path */ + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + ktime_t timestamp = 0; + struct xdp_buff *xdp; + unsigned int size; + int xdp_res = 0; + + rx_desc = IGB_RX_DESC(rx_ring, ntc); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + xdp = rx_ring->rx_buffer_info_zc[ntc]; + xsk_buff_set_size(xdp, size); + xsk_buff_dma_sync_for_cpu(xdp); + + /* pull rx packet timestamp if available and valid */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + int ts_hdr_len; + + ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, + xdp->data, + ×tamp); + + xdp->data += ts_hdr_len; + xdp->data_meta += ts_hdr_len; + size -= ts_hdr_len; + } + + xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool, + xdp_prog); + + if (xdp_res) { + if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) { + xdp_xmit |= xdp_res; + } else if (xdp_res == IGB_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == IGB_XDP_CONSUMED) { + xsk_buff_free(xdp); + } + + total_packets++; + total_bytes += size; + ntc++; + if (ntc == rx_ring->count) + ntc = 0; + continue; + } + + skb = igb_construct_skb_zc(rx_ring, xdp, timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + break; + } + + xsk_buff_free(xdp); + ntc++; + if (ntc == rx_ring->count) + ntc = 0; + + if (eth_skb_pad(skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* update budget accounting */ + total_packets++; + } + + rx_ring->next_to_clean = ntc; + + if (xdp_xmit) + igb_finalize_xdp(adapter, xdp_xmit); + + igb_update_rx_stats(q_vector, total_packets, total_bytes); + + entries_to_alloc = igb_desc_unused(rx_ring); + if (entries_to_alloc >= IGB_RX_BUFFER_WRITE) + failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool, + entries_to_alloc); + + if (xsk_uses_need_wakeup(xsk_pool)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(xsk_pool); + else + xsk_clear_rx_need_wakeup(xsk_pool); + + return (int)total_packets; + } + return failure ? budget : (int)total_packets; +} + +bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool) +{ + unsigned int budget = igb_desc_unused(tx_ring); + u32 cmd_type, olinfo_status, nb_pkts, i = 0; + struct xdp_desc *descs = xsk_pool->tx_descs; + union e1000_adv_tx_desc *tx_desc = NULL; + struct igb_tx_buffer *tx_buffer_info; + unsigned int total_bytes = 0; + dma_addr_t dma; + + if (!netif_carrier_ok(tx_ring->netdev)) + return true; + + if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)) + return true; + + nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget); + if (!nb_pkts) + return true; + + while (nb_pkts-- > 0) { + dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr); + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len); + + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + tx_buffer_info->bytecount = descs[i].len; + tx_buffer_info->type = IGB_TYPE_XSK; + tx_buffer_info->xdpf = NULL; + tx_buffer_info->gso_segs = 1; + tx_buffer_info->time_stamp = jiffies; + + tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT; + + /* FIXME: This sets the Report Status (RS) bit for every + * descriptor. One nice to have optimization would be to set it + * only for the last descriptor in the whole batch. See Intel + * ice driver for an example on how to do it. + */ + cmd_type |= descs[i].len | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + total_bytes += descs[i].len; + + i++; + tx_ring->next_to_use++; + tx_buffer_info->next_to_watch = tx_desc; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes); + igb_xdp_ring_update_tail(tx_ring); + + return nb_pkts < budget; +} + +int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) +{ + struct igb_adapter *adapter = netdev_priv(dev); + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; + u32 eics = 0; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igb_xdp_is_enabled(adapter)) + return -EINVAL; + + if (qid >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[qid]; + + if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags)) + return -ENETDOWN; + + if (!READ_ONCE(ring->xsk_pool)) + return -EINVAL; + + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + /* Cause software interrupt */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + eics |= ring->q_vector->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + } + + return 0; +} diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index eac0f966e0e4..b8111ad9a9a8 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -337,6 +337,8 @@ struct igc_adapter { struct igc_led_classdev *leds; }; +void igc_set_queue_napi(struct igc_adapter *adapter, int q_idx, + struct napi_struct *napi); void igc_up(struct igc_adapter *adapter); void igc_down(struct igc_adapter *adapter); int igc_open(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 9fae8bdec2a7..1613b562d17c 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw) u32 eecd = rd32(IGC_EECD); u16 size; + /* failed to read reg and got all F's */ + if (!(~eecd)) + return -ENXIO; + size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd); /* Added to a constant, "size" becomes the left-shift value @@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) /* NVM initialization */ ret_val = igc_init_nvm_params_base(hw); + if (ret_val) + goto out; switch (hw->mac.type) { case igc_i225: ret_val = igc_init_nvm_params_i225(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index d9d1a1a11daf..be8a49a86d09 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -279,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw); #define hw_dbg(format, arg...) \ netdev_dbg(igc_get_hw_dev(hw), format, ##arg) -s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); -s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); -void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); -void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); - #endif /* _IGC_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 27872bdea9bd..84307bb7313e 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring, return -ENOMEM; } + buffer->type = IGC_TX_BUFFER_TYPE_SKB; buffer->skb = skb; buffer->protocol = 0; buffer->bytecount = skb->len; @@ -2123,10 +2124,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring, union igc_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { struct net_device *netdev = rx_ring->netdev; @@ -2515,8 +2512,7 @@ out_failure: } } -static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, - struct xdp_buff *xdp) +static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp) { struct bpf_prog *prog; int res; @@ -2530,7 +2526,7 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, res = __igc_xdp_run_prog(adapter, prog, xdp); out: - return ERR_PTR(-res); + return res; } /* This function assumes __netif_tx_lock is held by the caller. */ @@ -2585,6 +2581,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = igc_desc_unused(rx_ring); int xdp_status = 0, rx_buffer_pgcnt; + int xdp_res = 0; while (likely(total_packets < budget)) { struct igc_xdp_buff ctx = { .rx_ts = NULL }; @@ -2630,12 +2627,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) xdp_buff_clear_frags_flag(&ctx.xdp); ctx.rx_desc = rx_desc; - skb = igc_xdp_run_prog(adapter, &ctx.xdp); + xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { switch (xdp_res) { case IGC_XDP_CONSUMED: rx_buffer->pagecnt_bias++; @@ -2657,7 +2652,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) skb = igc_construct_skb(rx_ring, rx_buffer, &ctx); /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); @@ -2672,7 +2667,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) continue; /* verify the packet layout is correct */ - if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -2707,8 +2702,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) } static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, - struct xdp_buff *xdp) + struct igc_xdp_buff *ctx) { + struct xdp_buff *xdp = &ctx->xdp; unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; @@ -2727,27 +2723,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, __skb_pull(skb, metasize); } + if (ctx->rx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; + skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; + } + return skb; } static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, union igc_adv_rx_desc *desc, - struct xdp_buff *xdp, - ktime_t timestamp) + struct igc_xdp_buff *ctx) { struct igc_ring *ring = q_vector->rx.ring; struct sk_buff *skb; - skb = igc_construct_skb_zc(ring, xdp); + skb = igc_construct_skb_zc(ring, ctx); if (!skb) { ring->rx_stats.alloc_failed++; set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); return; } - if (timestamp) - skb_hwtstamps(skb)->hwtstamp = timestamp; - if (igc_cleanup_headers(ring, desc, skb)) return; @@ -2783,7 +2780,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) union igc_adv_rx_desc *desc; struct igc_rx_buffer *bi; struct igc_xdp_buff *ctx; - ktime_t timestamp = 0; unsigned int size; int res; @@ -2813,6 +2809,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) */ bi->xdp->data_meta += IGC_TS_HDR_LEN; size -= IGC_TS_HDR_LEN; + } else { + ctx->rx_ts = NULL; } bi->xdp->data_end = bi->xdp->data + size; @@ -2821,7 +2819,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) res = __igc_xdp_run_prog(adapter, prog, bi->xdp); switch (res) { case IGC_XDP_PASS: - igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + igc_dispatch_skb_zc(q_vector, desc, ctx); fallthrough; case IGC_XDP_CONSUMED: xsk_buff_free(bi->xdp); @@ -4948,6 +4946,22 @@ static int igc_sw_init(struct igc_adapter *adapter) return 0; } +void igc_set_queue_napi(struct igc_adapter *adapter, int vector, + struct napi_struct *napi) +{ + struct igc_q_vector *q_vector = adapter->q_vector[vector]; + + if (q_vector->rx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->rx.ring->queue_index, + NETDEV_QUEUE_TYPE_RX, napi); + + if (q_vector->tx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->tx.ring->queue_index, + NETDEV_QUEUE_TYPE_TX, napi); +} + /** * igc_up - Open the interface and prepare it to handle traffic * @adapter: board private structure @@ -4955,6 +4969,7 @@ static int igc_sw_init(struct igc_adapter *adapter) void igc_up(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + struct napi_struct *napi; int i = 0; /* hardware has been reset, we need to reload some things */ @@ -4962,8 +4977,11 @@ void igc_up(struct igc_adapter *adapter) clear_bit(__IGC_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&adapter->q_vector[i]->napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igc_set_queue_napi(adapter, i, napi); + } if (adapter->msix_entries) igc_configure_msix(adapter); @@ -5192,6 +5210,7 @@ void igc_down(struct igc_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { if (adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); + igc_set_queue_napi(adapter, i, NULL); napi_disable(&adapter->q_vector[i]->napi); } } @@ -5576,6 +5595,9 @@ static int igc_request_msix(struct igc_adapter *adapter) q_vector); if (err) goto err_free; + + netif_napi_set_irq(&q_vector->napi, + adapter->msix_entries[vector].vector); } igc_configure_msix(adapter); @@ -6018,6 +6040,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) struct igc_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; + struct napi_struct *napi; int err = 0; int i = 0; @@ -6053,8 +6076,11 @@ static int __igc_open(struct net_device *netdev, bool resuming) clear_bit(__IGC_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&adapter->q_vector[i]->napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igc_set_queue_napi(adapter, i, napi); + } /* Clear any pending interrupts. */ rd32(IGC_ICR); @@ -6779,45 +6805,6 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_get_tstamp = igc_get_tstamp, }; -/* PCIe configuration access */ -void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - pci_read_config_word(adapter->pdev, reg, value); -} - -void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - pci_write_config_word(adapter->pdev, reg, *value); -} - -s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - if (!pci_is_pcie(adapter->pdev)) - return -IGC_ERR_CONFIG; - - pcie_capability_read_word(adapter->pdev, reg, value); - - return IGC_SUCCESS; -} - -s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) -{ - struct igc_adapter *adapter = hw->back; - - if (!pci_is_pcie(adapter->pdev)) - return -IGC_ERR_CONFIG; - - pcie_capability_write_word(adapter->pdev, reg, *value); - - return IGC_SUCCESS; -} - u32 igc_rd32(struct igc_hw *hw, u32 reg) { struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); @@ -7338,7 +7325,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev) netif_rx(skb); } -static int igc_resume(struct device *dev) +static int __igc_resume(struct device *dev, bool rpm) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -7381,7 +7368,11 @@ static int igc_resume(struct device *dev) wr32(IGC_WUS, ~0); if (netif_running(netdev)) { + if (!rpm) + rtnl_lock(); err = __igc_open(netdev, true); + if (!rpm) + rtnl_unlock(); if (!err) netif_device_attach(netdev); } @@ -7389,9 +7380,14 @@ static int igc_resume(struct device *dev) return err; } +static int igc_resume(struct device *dev) +{ + return __igc_resume(dev, false); +} + static int igc_runtime_resume(struct device *dev) { - return igc_resume(dev); + return __igc_resume(dev, true); } static int igc_suspend(struct device *dev) @@ -7436,14 +7432,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, struct net_device *netdev = pci_get_drvdata(pdev); struct igc_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); netif_device_detach(netdev); - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; + } if (netif_running(netdev)) igc_down(adapter); pci_disable_device(pdev); + rtnl_unlock(); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -7454,7 +7454,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igc_resume routine. + * resembles the first-half of the __igc_resume routine. **/ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) { @@ -7493,7 +7493,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the - * second-half of the igc_resume routine. + * second-half of the __igc_resume routine. */ static void igc_io_resume(struct pci_dev *pdev) { diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c index 58f81aba0144..efd121c03967 100644 --- a/drivers/net/ethernet/intel/igc/igc_nvm.c +++ b/drivers/net/ethernet/intel/igc/igc_nvm.c @@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) } /** - * igc_acquire_nvm - Generic request for access to EEPROM - * @hw: pointer to the HW structure - * - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -IGC_ERR_NVM (-1). - */ -s32 igc_acquire_nvm(struct igc_hw *hw) -{ - s32 timeout = IGC_NVM_GRANT_ATTEMPTS; - u32 eecd = rd32(IGC_EECD); - s32 ret_val = 0; - - wr32(IGC_EECD, eecd | IGC_EECD_REQ); - eecd = rd32(IGC_EECD); - - while (timeout) { - if (eecd & IGC_EECD_GNT) - break; - udelay(5); - eecd = rd32(IGC_EECD); - timeout--; - } - - if (!timeout) { - eecd &= ~IGC_EECD_REQ; - wr32(IGC_EECD, eecd); - hw_dbg("Could not acquire NVM grant\n"); - ret_val = -IGC_ERR_NVM; - } - - return ret_val; -} - -/** - * igc_release_nvm - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit. - */ -void igc_release_nvm(struct igc_hw *hw) -{ - u32 eecd; - - eecd = rd32(IGC_EECD); - eecd &= ~IGC_EECD_REQ; - wr32(IGC_EECD, eecd); -} - -/** * igc_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h index f9fc2e9cfb03..ab78d0c64547 100644 --- a/drivers/net/ethernet/intel/igc/igc_nvm.h +++ b/drivers/net/ethernet/intel/igc/igc_nvm.h @@ -4,8 +4,6 @@ #ifndef _IGC_NVM_H_ #define _IGC_NVM_H_ -s32 igc_acquire_nvm(struct igc_hw *hw); -void igc_release_nvm(struct igc_hw *hw); s32 igc_read_mac_addr(struct igc_hw *hw); s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); s32 igc_validate_nvm_checksum(struct igc_hw *hw); diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c index e27af72aada8..13bbd3346e01 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.c +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -13,6 +13,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, struct net_device *dev = adapter->netdev; bool if_running = netif_running(dev); struct bpf_prog *old_prog; + bool need_update; if (dev->mtu > ETH_DATA_LEN) { /* For now, the driver doesn't support XDP functionality with @@ -22,7 +23,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, return -EOPNOTSUPP; } - if (if_running) + need_update = !!adapter->xdp_prog != !!prog; + if (if_running && need_update) igc_close(dev); old_prog = xchg(&adapter->xdp_prog, prog); @@ -34,7 +36,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, else xdp_features_clear_redirect_target(dev); - if (if_running) + if (if_running && need_update) igc_open(dev); return 0; @@ -84,6 +86,7 @@ static int igc_xdp_enable_pool(struct igc_adapter *adapter, napi_disable(napi); } + igc_set_queue_napi(adapter, queue_id, NULL); set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); @@ -133,6 +136,7 @@ static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + igc_set_queue_napi(adapter, queue_id, napi); if (needs_reset) { napi_enable(napi); diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 965e5ce1b326..b456d102655a 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -# Copyright(c) 1999 - 2018 Intel Corporation. +# Copyright(c) 1999 - 2024 Intel Corporation. # # Makefile for the Intel(R) 10GbE PCI Express ethernet driver # @@ -9,7 +9,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ - ixgbe_xsk.o + ixgbe_xsk.o ixgbe_e610.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 559b443c409f..e6a380d4929b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBE_H_ #define _IXGBE_H_ @@ -20,6 +20,7 @@ #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_dcb.h" +#include "ixgbe_e610.h" #if IS_ENABLED(CONFIG_FCOE) #define IXGBE_FCOE #include "ixgbe_fcoe.h" @@ -173,6 +174,7 @@ enum ixgbe_tx_flags { #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) #define IXGBE_82599_VF_DEVICE_ID 0x10ED #define IXGBE_X540_VF_DEVICE_ID 0x1515 +#define IXGBE_E610_VF_DEVICE_ID 0x57AD #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ { \ @@ -654,6 +656,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) +#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) #define IXGBE_FLAG2_EEE_ENABLED BIT(15) @@ -661,6 +664,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) #define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) #define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19) +#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20) +#define IXGBE_FLAG2_NO_MEDIA BIT(21) +#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22) /* Tx fast path data */ int num_tx_queues; @@ -793,6 +799,7 @@ struct ixgbe_adapter { u32 vferr_refcount; struct ixgbe_mac_addr *mac_table; struct kobject *info_kobj; + u16 lse_mask; #ifdef CONFIG_IXGBE_HWMON struct hwmon_buff *ixgbe_hwmon_buff; #endif /* CONFIG_IXGBE_HWMON */ @@ -849,6 +856,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: return IXGBE_MAX_RSS_INDICES_X550; default: return 0; @@ -874,6 +882,7 @@ enum ixgbe_state_t { __IXGBE_PTP_RUNNING, __IXGBE_PTP_TX_IN_PROGRESS, __IXGBE_RESET_REQUESTED, + __IXGBE_PHY_INIT_COMPLETE, }; struct ixgbe_cb { @@ -896,6 +905,7 @@ enum ixgbe_boards { board_x550em_x_fw, board_x550em_a, board_x550em_a_fw, + board_e610, }; extern const struct ixgbe_info ixgbe_82598_info; @@ -906,6 +916,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info; extern const struct ixgbe_info ixgbe_x550em_x_fw_info; extern const struct ixgbe_info ixgbe_x550em_a_info; extern const struct ixgbe_info ixgbe_x550em_a_fw_info; +extern const struct ixgbe_info ixgbe_e610_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index cdaf087b4e85..964988b4d58b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -1615,6 +1615,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 3be1bfb16498..7beaf6ea57f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_E610_SFP: supported = false; break; default: @@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_E610_10G_T: + case IXGBE_DEV_ID_E610_2_5G_T: supported = true; break; default: @@ -469,9 +472,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) } } - if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_e610) { if (hw->phy.id == 0) hw->phy.ops.identify(hw); + } + + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); @@ -660,7 +668,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) hw->bus.type = ixgbe_bus_type_pci_express; /* Get the negotiated link width and speed from PCI config space */ - link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); + if (hw->mac.type == ixgbe_mac_e610) + link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610); + else + link_status = ixgbe_read_pci_cfg_word(hw, + IXGBE_PCI_LINK_STATUS); hw->bus.width = ixgbe_convert_bus_width(link_status); hw->bus.speed = ixgbe_convert_bus_speed(link_status); @@ -2918,6 +2930,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; + case ixgbe_mac_e610: + pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; default: return 1; } @@ -3366,7 +3382,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: - if ((hw->mac.type >= ixgbe_mac_X550) && + if ((hw->mac.type >= ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_e610) && (links_reg & IXGBE_LINKS_SPEED_NON_STD)) *speed = IXGBE_LINK_SPEED_5GB_FULL; else diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index f2709b10c2e5..19d6b6fa8fb3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "ixgbe.h" #include <linux/dcbnl.h> @@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: + case ixgbe_mac_e610: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c new file mode 100644 index 000000000000..683c668672d6 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c @@ -0,0 +1,2658 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Intel Corporation. */ + +#include "ixgbe_common.h" +#include "ixgbe_e610.h" +#include "ixgbe_x550.h" +#include "ixgbe_type.h" +#include "ixgbe_x540.h" +#include "ixgbe_mbx.h" +#include "ixgbe_phy.h" + +/** + * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should + * be resent + * @opcode: ACI opcode + * + * Check if ACI command should be sent again depending on the provided opcode. + * It may happen when CSR is busy during link state changes. + * + * Return: true if the sending command routine should be repeated, + * otherwise false. + */ +static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode) +{ + switch (opcode) { + case ixgbe_aci_opc_disable_rxen: + case ixgbe_aci_opc_get_phy_caps: + case ixgbe_aci_opc_get_link_status: + case ixgbe_aci_opc_get_link_topo: + return true; + } + + return false; +} + +/** + * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin + * Command Interface + * @hw: pointer to the HW struct + * @desc: descriptor describing the command + * @buf: buffer to use for indirect commands (NULL for direct commands) + * @buf_size: size of buffer for indirect commands (0 for direct commands) + * + * Admin Command is sent using CSR by setting descriptor and buffer in specific + * registers. + * + * Return: the exit code of the operation. + * * - 0 - success. + * * - -EIO - CSR mechanism is not enabled. + * * - -EBUSY - CSR mechanism is busy. + * * - -EINVAL - buf_size is too big or + * invalid argument buf or buf_size. + * * - -ETIME - Admin Command X command timeout. + * * - -EIO - Admin Command X invalid state of HICR register or + * Admin Command failed because of bad opcode was returned or + * Admin Command failed with error Y. + */ +static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, + struct ixgbe_aci_desc *desc, + void *buf, u16 buf_size) +{ + u16 opcode, buf_tail_size = buf_size % 4; + u32 *raw_desc = (u32 *)desc; + u32 hicr, i, buf_tail = 0; + bool valid_buf = false; + + hw->aci.last_status = IXGBE_ACI_RC_OK; + + /* It's necessary to check if mechanism is enabled */ + hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR); + + if (!(hicr & IXGBE_PF_HICR_EN)) + return -EIO; + + if (hicr & IXGBE_PF_HICR_C) { + hw->aci.last_status = IXGBE_ACI_RC_EBUSY; + return -EBUSY; + } + + opcode = le16_to_cpu(desc->opcode); + + if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) + return -EINVAL; + + if (buf) + desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF); + + if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) { + if ((buf && !buf_size) || + (!buf && buf_size)) + return -EINVAL; + if (buf && buf_size) + valid_buf = true; + } + + if (valid_buf) { + if (buf_tail_size) + memcpy(&buf_tail, buf + buf_size - buf_tail_size, + buf_tail_size); + + if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF) + desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB); + + desc->datalen = cpu_to_le16(buf_size); + + if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) { + for (i = 0; i < buf_size / 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]); + if (buf_tail_size) + IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail); + } + } + + /* Descriptor is written to specific registers */ + for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) + IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]); + + /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and + * PF_HICR_EV + */ + hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) & + ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV); + IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr); + +#define MAX_SLEEP_RESP_US 1000 +#define MAX_TMOUT_RESP_SYNC_US 100000000 + + /* Wait for sync Admin Command response */ + read_poll_timeout(IXGBE_READ_REG, hicr, + (hicr & IXGBE_PF_HICR_SV) || + !(hicr & IXGBE_PF_HICR_C), + MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw, + IXGBE_PF_HICR); + +#define MAX_TMOUT_RESP_ASYNC_US 150000000 + + /* Wait for async Admin Command response */ + read_poll_timeout(IXGBE_READ_REG, hicr, + (hicr & IXGBE_PF_HICR_EV) || + !(hicr & IXGBE_PF_HICR_C), + MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw, + IXGBE_PF_HICR); + + /* Read sync Admin Command response */ + if ((hicr & IXGBE_PF_HICR_SV)) { + for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { + raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i)); + raw_desc[i] = raw_desc[i]; + } + } + + /* Read async Admin Command response */ + if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) { + for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { + raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i)); + raw_desc[i] = raw_desc[i]; + } + } + + /* Handle timeout and invalid state of HICR register */ + if (hicr & IXGBE_PF_HICR_C) + return -ETIME; + + if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV)) + return -EIO; + + /* For every command other than 0x0014 treat opcode mismatch + * as an error. Response to 0x0014 command read from HIDA_2 + * is a descriptor of an event which is expected to contain + * different opcode than the command. + */ + if (desc->opcode != cpu_to_le16(opcode) && + opcode != ixgbe_aci_opc_get_fw_event) + return -EIO; + + if (desc->retval) { + hw->aci.last_status = (enum ixgbe_aci_err) + le16_to_cpu(desc->retval); + return -EIO; + } + + /* Write a response values to a buf */ + if (valid_buf) { + for (i = 0; i < buf_size / 4; i++) + ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); + if (buf_tail_size) { + buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); + memcpy(buf + buf_size - buf_tail_size, &buf_tail, + buf_tail_size); + } + } + + return 0; +} + +/** + * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface + * @hw: pointer to the HW struct + * @desc: descriptor describing the command + * @buf: buffer to use for indirect commands (NULL for direct commands) + * @buf_size: size of buffer for indirect commands (0 for direct commands) + * + * Helper function to send FW Admin Commands to the FW Admin Command Interface. + * + * Retry sending the FW Admin Command multiple times to the FW ACI + * if the EBUSY Admin Command error is returned. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc, + void *buf, u16 buf_size) +{ + u16 opcode = le16_to_cpu(desc->opcode); + struct ixgbe_aci_desc desc_cpy; + enum ixgbe_aci_err last_status; + u8 idx = 0, *buf_cpy = NULL; + bool is_cmd_for_retry; + unsigned long timeout; + int err; + + is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode); + if (is_cmd_for_retry) { + if (buf) { + buf_cpy = kmalloc(buf_size, GFP_KERNEL); + if (!buf_cpy) + return -ENOMEM; + *buf_cpy = *(u8 *)buf; + } + desc_cpy = *desc; + } + + timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS); + do { + mutex_lock(&hw->aci.lock); + err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size); + last_status = hw->aci.last_status; + mutex_unlock(&hw->aci.lock); + + if (!is_cmd_for_retry || !err || + last_status != IXGBE_ACI_RC_EBUSY) + break; + + if (buf) + memcpy(buf, buf_cpy, buf_size); + *desc = desc_cpy; + + msleep(IXGBE_ACI_SEND_DELAY_TIME_MS); + } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE && + time_before(jiffies, timeout)); + + kfree(buf_cpy); + + return err; +} + +/** + * ixgbe_aci_check_event_pending - check if there are any pending events + * @hw: pointer to the HW struct + * + * Determine if there are any pending events. + * + * Return: true if there are any currently pending events + * otherwise false. + */ +bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw) +{ + u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0; + u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS); + + return (fwsts & ep_bit_mask) ? true : false; +} + +/** + * ixgbe_aci_get_event - get an event from ACI + * @hw: pointer to the HW struct + * @e: event information structure + * @pending: optional flag signaling that there are more pending events + * + * Obtain an event from ACI and return its content + * through 'e' using ACI command (0x0014). + * Provide information if there are more events + * to retrieve through 'pending'. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e, + bool *pending) +{ + struct ixgbe_aci_desc desc; + int err; + + if (!e || (!e->msg_buf && e->buf_len)) + return -EINVAL; + + mutex_lock(&hw->aci.lock); + + /* Check if there are any events pending */ + if (!ixgbe_aci_check_event_pending(hw)) { + err = -ENOENT; + goto aci_get_event_exit; + } + + /* Obtain pending event */ + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event); + err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len); + if (err) + goto aci_get_event_exit; + + /* Returned 0x0014 opcode indicates that no event was obtained */ + if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) { + err = -ENOENT; + goto aci_get_event_exit; + } + + /* Determine size of event data */ + e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len); + /* Write event descriptor to event info structure */ + memcpy(&e->desc, &desc, sizeof(e->desc)); + + /* Check if there are any further events pending */ + if (pending) + *pending = ixgbe_aci_check_event_pending(hw); + +aci_get_event_exit: + mutex_unlock(&hw->aci.lock); + + return err; +} + +/** + * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values. + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Helper function to fill the descriptor desc with default values + * and the provided opcode. + */ +void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode) +{ + /* Zero out the desc. */ + memset(desc, 0, sizeof(*desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI); +} + +/** + * ixgbe_aci_req_res - request a common resource + * @hw: pointer to the HW struct + * @res: resource ID + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * + * Requests a common resource using the ACI command (0x0008). + * Specifies the maximum time the driver may hold the resource. + * If the requested resource is currently occupied by some other driver, + * a busy return value is returned and the timeout field value indicates the + * maximum time the current owner has to free it. + * + * Return: the exit code of the operation. + */ +static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, + enum ixgbe_aci_res_access_type access, + u8 sdp_number, u32 *timeout) +{ + struct ixgbe_aci_cmd_req_res *cmd_resp; + struct ixgbe_aci_desc desc; + int err; + + cmd_resp = &desc.params.res_owner; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res); + + cmd_resp->res_id = cpu_to_le16(res); + cmd_resp->access_type = cpu_to_le16(access); + cmd_resp->res_number = cpu_to_le32(sdp_number); + cmd_resp->timeout = cpu_to_le32(*timeout); + *timeout = 0; + + err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + + /* If the resource is held by some other driver, the command completes + * with a busy return value and the timeout field indicates the maximum + * time the current owner of the resource has to free it. + */ + if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY) + *timeout = le32_to_cpu(cmd_resp->timeout); + + return err; +} + +/** + * ixgbe_aci_release_res - release a common resource using ACI + * @hw: pointer to the HW struct + * @res: resource ID + * @sdp_number: resource number + * + * Release a common resource using ACI command (0x0009). + * + * Return: the exit code of the operation. + */ +static int ixgbe_aci_release_res(struct ixgbe_hw *hw, + enum ixgbe_aci_res_ids res, u8 sdp_number) +{ + struct ixgbe_aci_cmd_req_res *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.res_owner; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res); + + cmd->res_id = cpu_to_le16(res); + cmd->res_number = cpu_to_le32(sdp_number); + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_acquire_res - acquire the ownership of a resource + * @hw: pointer to the HW structure + * @res: resource ID + * @access: access type (read or write) + * @timeout: timeout in milliseconds + * + * Make an attempt to acquire the ownership of a resource using + * the ixgbe_aci_req_res to utilize ACI. + * In case if some other driver has previously acquired the resource and + * performed any necessary updates, the -EALREADY is returned, + * and the caller does not obtain the resource and has no further work to do. + * If needed, the function will poll until the current lock owner timeouts. + * + * Return: the exit code of the operation. + */ +int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, + enum ixgbe_aci_res_access_type access, u32 timeout) +{ +#define IXGBE_RES_POLLING_DELAY_MS 10 + u32 delay = IXGBE_RES_POLLING_DELAY_MS; + u32 res_timeout = timeout; + u32 retry_timeout; + int err; + + err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); + + /* A return code of -EALREADY means that another driver has + * previously acquired the resource and performed any necessary updates; + * in this case the caller does not obtain the resource and has no + * further work to do. + */ + if (err == -EALREADY) + return err; + + /* If necessary, poll until the current lock owner timeouts. + * Set retry_timeout to the timeout value reported by the FW in the + * response to the "Request Resource Ownership" (0x0008) Admin Command + * as it indicates the maximum time the current owner of the resource + * is allowed to hold it. + */ + retry_timeout = res_timeout; + while (err && retry_timeout && res_timeout) { + msleep(delay); + retry_timeout = (retry_timeout > delay) ? + retry_timeout - delay : 0; + err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); + + /* Success - lock acquired. + * -EALREADY - lock free, no work to do. + */ + if (!err || err == -EALREADY) + break; + } + + return err; +} + +/** + * ixgbe_release_res - release a common resource + * @hw: pointer to the HW structure + * @res: resource ID + * + * Release a common resource using ixgbe_aci_release_res. + */ +void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res) +{ + u32 total_delay = 0; + int err; + + err = ixgbe_aci_release_res(hw, res, 0); + + /* There are some rare cases when trying to release the resource + * results in an admin command timeout, so handle them correctly. + */ + while (err == -ETIME && + total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) { + usleep_range(1000, 1500); + err = ixgbe_aci_release_res(hw, res, 0); + total_delay++; + } +} + +/** + * ixgbe_parse_e610_caps - Parse common device/function capabilities + * @hw: pointer to the HW struct + * @caps: pointer to common capabilities structure + * @elem: the capability element to parse + * @prefix: message prefix for tracing capabilities + * + * Given a capability element, extract relevant details into the common + * capability structure. + * + * Return: true if the capability matches one of the common capability ids, + * false otherwise. + */ +static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_caps *caps, + struct ixgbe_aci_cmd_list_caps_elem *elem, + const char *prefix) +{ + u32 logical_id = le32_to_cpu(elem->logical_id); + u32 phys_id = le32_to_cpu(elem->phys_id); + u32 number = le32_to_cpu(elem->number); + u16 cap = le16_to_cpu(elem->cap); + + switch (cap) { + case IXGBE_ACI_CAPS_VALID_FUNCTIONS: + caps->valid_functions = number; + break; + case IXGBE_ACI_CAPS_SRIOV: + caps->sr_iov_1_1 = (number == 1); + break; + case IXGBE_ACI_CAPS_VMDQ: + caps->vmdq = (number == 1); + break; + case IXGBE_ACI_CAPS_DCB: + caps->dcb = (number == 1); + caps->active_tc_bitmap = logical_id; + caps->maxtc = phys_id; + break; + case IXGBE_ACI_CAPS_RSS: + caps->rss_table_size = number; + caps->rss_table_entry_width = logical_id; + break; + case IXGBE_ACI_CAPS_RXQS: + caps->num_rxq = number; + caps->rxq_first_id = phys_id; + break; + case IXGBE_ACI_CAPS_TXQS: + caps->num_txq = number; + caps->txq_first_id = phys_id; + break; + case IXGBE_ACI_CAPS_MSIX: + caps->num_msix_vectors = number; + caps->msix_vector_first_id = phys_id; + break; + case IXGBE_ACI_CAPS_NVM_VER: + break; + case IXGBE_ACI_CAPS_MAX_MTU: + caps->max_mtu = number; + break; + case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE: + caps->pcie_reset_avoidance = (number > 0); + break; + case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT: + caps->reset_restrict_support = (number == 1); + break; + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0: + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1: + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2: + case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3: + { + u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0; + + caps->ext_topo_dev_img_ver_high[index] = number; + caps->ext_topo_dev_img_ver_low[index] = logical_id; + caps->ext_topo_dev_img_part_num[index] = + FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id); + caps->ext_topo_dev_img_load_en[index] = + (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0; + caps->ext_topo_dev_img_prog_en[index] = + (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0; + break; + } + default: + /* Not one of the recognized common capabilities */ + return false; + } + + return true; +} + +/** + * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities. + */ +static void +ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_funcs = hweight32(le32_to_cpu(cap->number)); +} + +/** + * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_VF for device capabilities. + */ +static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_vfs_exposed = le32_to_cpu(cap->number); +} + +/** + * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_VSI for device capabilities. + */ +static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number); +} + +/** + * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse IXGBE_ACI_CAPS_FD for device capabilities. + */ +static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + dev_p->num_flow_director_fltr = le32_to_cpu(cap->number); +} + +/** + * ixgbe_parse_dev_caps - Parse device capabilities + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @buf: buffer containing the device capability records + * @cap_count: the number of capabilities + * + * Helper device to parse device (0x000B) capabilities list. For + * capabilities shared between device and function, this relies on + * ixgbe_parse_e610_caps. + * + * Loop through the list of provided capabilities and extract the relevant + * data into the device capabilities structured. + */ +static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_p, + void *buf, u32 cap_count) +{ + struct ixgbe_aci_cmd_list_caps_elem *cap_resp; + u32 i; + + cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; + + memset(dev_p, 0, sizeof(*dev_p)); + + for (i = 0; i < cap_count; i++) { + u16 cap = le16_to_cpu(cap_resp[i].cap); + + ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i], + "dev caps"); + + switch (cap) { + case IXGBE_ACI_CAPS_VALID_FUNCTIONS: + ixgbe_parse_valid_functions_cap(hw, dev_p, + &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_VF: + ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_VSI: + ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_FD: + ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); + break; + default: + /* Don't list common capabilities as unknown */ + break; + } + } +} + +/** + * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @cap: pointer to the capability element to parse + * + * Extract function capabilities for IXGBE_ACI_CAPS_VF. + */ +static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + func_p->num_allocd_vfs = le32_to_cpu(cap->number); + func_p->vf_base_id = le32_to_cpu(cap->logical_id); +} + +/** + * ixgbe_get_num_per_func - determine number of resources per PF + * @hw: pointer to the HW structure + * @max: value to be evenly split between each PF + * + * Determine the number of valid functions by going through the bitmap returned + * from parsing capabilities and use this to calculate the number of resources + * per PF based on the max value passed in. + * + * Return: the number of resources per PF or 0, if no PH are available. + */ +static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max) +{ +#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0) + u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & + IXGBE_CAPS_VALID_FUNCS_M); + + return funcs ? (max / funcs) : 0; +} + +/** + * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @cap: pointer to the capability element to parse + * + * Extract function capabilities for IXGBE_ACI_CAPS_VSI. + */ +static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_p, + struct ixgbe_aci_cmd_list_caps_elem *cap) +{ + func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI); +} + +/** + * ixgbe_parse_func_caps - Parse function capabilities + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @buf: buffer containing the function capability records + * @cap_count: the number of capabilities + * + * Helper function to parse function (0x000A) capabilities list. For + * capabilities shared between device and function, this relies on + * ixgbe_parse_e610_caps. + * + * Loop through the list of provided capabilities and extract the relevant + * data into the function capabilities structured. + */ +static void ixgbe_parse_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_p, + void *buf, u32 cap_count) +{ + struct ixgbe_aci_cmd_list_caps_elem *cap_resp; + u32 i; + + cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; + + memset(func_p, 0, sizeof(*func_p)); + + for (i = 0; i < cap_count; i++) { + u16 cap = le16_to_cpu(cap_resp[i].cap); + + ixgbe_parse_e610_caps(hw, &func_p->common_cap, + &cap_resp[i], "func caps"); + + switch (cap) { + case IXGBE_ACI_CAPS_VF: + ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]); + break; + case IXGBE_ACI_CAPS_VSI: + ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); + break; + default: + /* Don't list common capabilities as unknown */ + break; + } + } +} + +/** + * ixgbe_aci_list_caps - query function/device capabilities + * @hw: pointer to the HW struct + * @buf: a buffer to hold the capabilities + * @buf_size: size of the buffer + * @cap_count: if not NULL, set to the number of capabilities reported + * @opc: capabilities type to discover, device or function + * + * Get the function (0x000A) or device (0x000B) capabilities description from + * firmware and store it in the buffer. + * + * If the cap_count pointer is not NULL, then it is set to the number of + * capabilities firmware will report. Note that if the buffer size is too + * small, it is possible the command will return -ENOMEM. The + * cap_count will still be updated in this case. It is recommended that the + * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible + * buffer that firmware could return) to avoid this. + * + * Return: the exit code of the operation. + * Exit code of -ENOMEM means the buffer size is too small. + */ +int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size, + u32 *cap_count, enum ixgbe_aci_opc opc) +{ + struct ixgbe_aci_cmd_list_caps *cmd; + struct ixgbe_aci_desc desc; + int err; + + cmd = &desc.params.get_cap; + + if (opc != ixgbe_aci_opc_list_func_caps && + opc != ixgbe_aci_opc_list_dev_caps) + return -EINVAL; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, opc); + err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size); + + if (cap_count) + *cap_count = le32_to_cpu(cmd->count); + + return err; +} + +/** + * ixgbe_discover_dev_caps - Read and extract device capabilities + * @hw: pointer to the hardware structure + * @dev_caps: pointer to device capabilities structure + * + * Read the device capabilities and extract them into the dev_caps structure + * for later use. + * + * Return: the exit code of the operation. + */ +int ixgbe_discover_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_caps) +{ + u32 cap_count; + u8 *cbuf; + int err; + + cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!cbuf) + return -ENOMEM; + + /* Although the driver doesn't know the number of capabilities the + * device will return, we can simply send a 4KB buffer, the maximum + * possible size that firmware can return. + */ + cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / + sizeof(struct ixgbe_aci_cmd_list_caps_elem); + + err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, + &cap_count, + ixgbe_aci_opc_list_dev_caps); + if (!err) + ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count); + + kfree(cbuf); + + return 0; +} + +/** + * ixgbe_discover_func_caps - Read and extract function capabilities + * @hw: pointer to the hardware structure + * @func_caps: pointer to function capabilities structure + * + * Read the function capabilities and extract them into the func_caps structure + * for later use. + * + * Return: the exit code of the operation. + */ +int ixgbe_discover_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_caps) +{ + u32 cap_count; + u8 *cbuf; + int err; + + cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!cbuf) + return -ENOMEM; + + /* Although the driver doesn't know the number of capabilities the + * device will return, we can simply send a 4KB buffer, the maximum + * possible size that firmware can return. + */ + cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / + sizeof(struct ixgbe_aci_cmd_list_caps_elem); + + err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, + &cap_count, + ixgbe_aci_opc_list_func_caps); + if (!err) + ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count); + + kfree(cbuf); + + return 0; +} + +/** + * ixgbe_get_caps - get info about the HW + * @hw: pointer to the hardware structure + * + * Retrieve both device and function capabilities. + * + * Return: the exit code of the operation. + */ +int ixgbe_get_caps(struct ixgbe_hw *hw) +{ + int err; + + err = ixgbe_discover_dev_caps(hw, &hw->dev_caps); + if (err) + return err; + + return ixgbe_discover_func_caps(hw, &hw->func_caps); +} + +/** + * ixgbe_aci_disable_rxen - disable RX + * @hw: pointer to the HW struct + * + * Request a safe disable of Receive Enable using ACI command (0x000C). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_disable_rxen *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.disable_rxen; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen); + + cmd->lport_num = hw->bus.func; + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_aci_get_phy_caps - returns PHY capabilities + * @hw: pointer to the HW struct + * @qual_mods: report qualified modules + * @report_mode: report mode capabilities + * @pcaps: structure for PHY capabilities to be filled + * + * Returns the various PHY capabilities supported on the Port + * using ACI command (0x0600). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode, + struct ixgbe_aci_cmd_get_phy_caps_data *pcaps) +{ + struct ixgbe_aci_cmd_get_phy_caps *cmd; + u16 pcaps_size = sizeof(*pcaps); + struct ixgbe_aci_desc desc; + int err; + + cmd = &desc.params.get_phy; + + if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M)) + return -EINVAL; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps); + + if (qual_mods) + cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM); + + cmd->param0 |= cpu_to_le16(report_mode); + err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size); + if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) { + hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); + hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); + memcpy(hw->link.link_info.module_type, &pcaps->module_type, + sizeof(hw->link.link_info.module_type)); + } + + return err; +} + +/** + * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data + * @caps: PHY ability structure to copy data from + * @cfg: PHY configuration structure to copy data to + * + * Helper function to copy data from PHY capabilities data structure + * to PHY configuration data structure + */ +void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) +{ + if (!caps || !cfg) + return; + + memset(cfg, 0, sizeof(*cfg)); + cfg->phy_type_low = caps->phy_type_low; + cfg->phy_type_high = caps->phy_type_high; + cfg->caps = caps->caps; + cfg->low_power_ctrl_an = caps->low_power_ctrl_an; + cfg->eee_cap = caps->eee_cap; + cfg->eeer_value = caps->eeer_value; + cfg->link_fec_opt = caps->link_fec_options; + cfg->module_compliance_enforcement = + caps->module_compliance_enforcement; +} + +/** + * ixgbe_aci_set_phy_cfg - set PHY configuration + * @hw: pointer to the HW struct + * @cfg: structure with PHY configuration data to be set + * + * Set the various PHY configuration parameters supported on the Port + * using ACI command (0x0601). + * One or more of the Set PHY config parameters may be ignored in an MFP + * mode as the PF may not have the privilege to set some of the PHY Config + * parameters. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) +{ + struct ixgbe_aci_desc desc; + int err; + + if (!cfg) + return -EINVAL; + + /* Ensure that only valid bits of cfg->caps can be turned on. */ + cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg); + desc.params.set_phy.lport_num = hw->bus.func; + desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD); + + err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg)); + if (!err) + hw->phy.curr_user_phy_cfg = *cfg; + + return err; +} + +/** + * ixgbe_aci_set_link_restart_an - set up link and restart AN + * @hw: pointer to the HW struct + * @ena_link: if true: enable link, if false: disable link + * + * Function sets up the link and restarts the Auto-Negotiation over the link. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link) +{ + struct ixgbe_aci_cmd_restart_an *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.restart_an; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an); + + cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART; + cmd->lport_num = hw->bus.func; + if (ena_link) + cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE; + else + cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE; + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_is_media_cage_present - check if media cage is present + * @hw: pointer to the HW struct + * + * Identify presence of media cage using the ACI command (0x06E0). + * + * Return: true if media cage is present, else false. If no cage, then + * media type is backplane or BASE-T. + */ +static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_link_topo *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.get_link_topo; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); + + cmd->addr.topo_params.node_type_ctx = + FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M, + IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT); + + /* Set node type. */ + cmd->addr.topo_params.node_type_ctx |= + FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M, + IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE); + + /* Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present then + * connection type is backplane or BASE-T. + */ + return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL); +} + +/** + * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type + * @hw: pointer to the HW struct + * + * Try to identify the media type based on the phy type. + * If more than one media type, the ixgbe_media_type_unknown is returned. + * First, phy_type_low is checked, then phy_type_high. + * If none are identified, the ixgbe_media_type_unknown is returned + * + * Return: type of a media based on phy type in form of enum. + */ +static enum ixgbe_media_type +ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw) +{ + struct ixgbe_link_status *hw_link_info; + + if (!hw) + return ixgbe_media_type_unknown; + + hw_link_info = &hw->link.link_info; + if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) + /* If more than one media type is selected, report unknown */ + return ixgbe_media_type_unknown; + + if (hw_link_info->phy_type_low) { + /* 1G SGMII is a special case where some DA cable PHYs + * may show this as an option when it really shouldn't + * be since SGMII is meant to be between a MAC and a PHY + * in a backplane. Try to detect this case and handle it + */ + if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII && + (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == + IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || + hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == + IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) + return ixgbe_media_type_da; + + switch (hw_link_info->phy_type_low) { + case IXGBE_PHY_TYPE_LOW_1000BASE_SX: + case IXGBE_PHY_TYPE_LOW_1000BASE_LX: + case IXGBE_PHY_TYPE_LOW_10GBASE_SR: + case IXGBE_PHY_TYPE_LOW_10GBASE_LR: + case IXGBE_PHY_TYPE_LOW_25GBASE_SR: + case IXGBE_PHY_TYPE_LOW_25GBASE_LR: + return ixgbe_media_type_fiber; + case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + return ixgbe_media_type_fiber; + case IXGBE_PHY_TYPE_LOW_100BASE_TX: + case IXGBE_PHY_TYPE_LOW_1000BASE_T: + case IXGBE_PHY_TYPE_LOW_2500BASE_T: + case IXGBE_PHY_TYPE_LOW_5GBASE_T: + case IXGBE_PHY_TYPE_LOW_10GBASE_T: + case IXGBE_PHY_TYPE_LOW_25GBASE_T: + return ixgbe_media_type_copper; + case IXGBE_PHY_TYPE_LOW_10G_SFI_DA: + case IXGBE_PHY_TYPE_LOW_25GBASE_CR: + case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S: + case IXGBE_PHY_TYPE_LOW_25GBASE_CR1: + return ixgbe_media_type_da; + case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C: + if (ixgbe_is_media_cage_present(hw)) + return ixgbe_media_type_aui; + fallthrough; + case IXGBE_PHY_TYPE_LOW_1000BASE_KX: + case IXGBE_PHY_TYPE_LOW_2500BASE_KX: + case IXGBE_PHY_TYPE_LOW_2500BASE_X: + case IXGBE_PHY_TYPE_LOW_5GBASE_KR: + case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C: + case IXGBE_PHY_TYPE_LOW_25GBASE_KR: + case IXGBE_PHY_TYPE_LOW_25GBASE_KR1: + case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S: + return ixgbe_media_type_backplane; + } + } else { + switch (hw_link_info->phy_type_high) { + case IXGBE_PHY_TYPE_HIGH_10BASE_T: + return ixgbe_media_type_copper; + } + } + return ixgbe_media_type_unknown; +} + +/** + * ixgbe_update_link_info - update status of the HW network link + * @hw: pointer to the HW struct + * + * Update the status of the HW network link. + * + * Return: the exit code of the operation. + */ +int ixgbe_update_link_info(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data *pcaps; + struct ixgbe_link_status *li; + int err; + + if (!hw) + return -EINVAL; + + li = &hw->link.link_info; + + err = ixgbe_aci_get_link_info(hw, true, NULL); + if (err) + return err; + + if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE)) + return 0; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, + pcaps); + + if (!err) + memcpy(li->module_type, &pcaps->module_type, + sizeof(li->module_type)); + + kfree(pcaps); + + return err; +} + +/** + * ixgbe_get_link_status - get status of the HW network link + * @hw: pointer to the HW struct + * @link_up: pointer to bool (true/false = linkup/linkdown) + * + * Variable link_up is true if link is up, false if link is down. + * The variable link_up is invalid if status is non zero. As a + * result of this call, link status reporting becomes enabled + * + * Return: the exit code of the operation. + */ +int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up) +{ + if (!hw || !link_up) + return -EINVAL; + + if (hw->link.get_link_info) { + int err = ixgbe_update_link_info(hw); + + if (err) + return err; + } + + *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP; + + return 0; +} + +/** + * ixgbe_aci_get_link_info - get the link status + * @hw: pointer to the HW struct + * @ena_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * + * Get the current Link Status using ACI command (0x607). + * The current link can be optionally provided to update + * the status. + * + * Return: the link status of the adapter. + */ +int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse, + struct ixgbe_link_status *link) +{ + struct ixgbe_aci_cmd_get_link_status_data link_data = {}; + struct ixgbe_aci_cmd_get_link_status *resp; + struct ixgbe_link_status *li_old, *li; + struct ixgbe_fc_info *hw_fc_info; + struct ixgbe_aci_desc desc; + bool tx_pause, rx_pause; + u8 cmd_flags; + int err; + + if (!hw) + return -EINVAL; + + li_old = &hw->link.link_info_old; + li = &hw->link.link_info; + hw_fc_info = &hw->fc; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status); + cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS; + resp = &desc.params.get_link_status; + resp->cmd_flags = cpu_to_le16(cmd_flags); + resp->lport_num = hw->bus.func; + + err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data)); + if (err) + return err; + + /* Save off old link status information. */ + *li_old = *li; + + /* Update current link status information. */ + li->link_speed = le16_to_cpu(link_data.link_speed); + li->phy_type_low = le64_to_cpu(link_data.phy_type_low); + li->phy_type_high = le64_to_cpu(link_data.phy_type_high); + li->link_info = link_data.link_info; + li->link_cfg_err = link_data.link_cfg_err; + li->an_info = link_data.an_info; + li->ext_info = link_data.ext_info; + li->max_frame_size = le16_to_cpu(link_data.max_frame_size); + li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK; + li->topo_media_conflict = link_data.topo_media_conflict; + li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M | + IXGBE_ACI_CFG_PACING_TYPE_M); + + /* Update fc info. */ + tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX); + rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX); + if (tx_pause && rx_pause) + hw_fc_info->current_mode = ixgbe_fc_full; + else if (tx_pause) + hw_fc_info->current_mode = ixgbe_fc_tx_pause; + else if (rx_pause) + hw_fc_info->current_mode = ixgbe_fc_rx_pause; + else + hw_fc_info->current_mode = ixgbe_fc_none; + + li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) & + IXGBE_ACI_LSE_IS_ENABLED); + + /* Save link status information. */ + if (link) + *link = *li; + + /* Flag cleared so calling functions don't call AQ again. */ + hw->link.get_link_info = false; + + return 0; +} + +/** + * ixgbe_aci_set_event_mask - set event mask + * @hw: pointer to the HW struct + * @port_num: port number of the physical function + * @mask: event mask to be set + * + * Set the event mask using ACI command (0x0613). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask) +{ + struct ixgbe_aci_cmd_set_event_mask *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.set_event_mask; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask); + + cmd->lport_num = port_num; + + cmd->event_mask = cpu_to_le16(mask); + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_configure_lse - enable/disable link status events + * @hw: pointer to the HW struct + * @activate: true for enable lse, false otherwise + * @mask: event mask to be set; a set bit means deactivation of the + * corresponding event + * + * Set the event mask and then enable or disable link status events + * + * Return: the exit code of the operation. + */ +int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask) +{ + int err; + + err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask); + if (err) + return err; + + /* Enabling link status events generation by fw. */ + return ixgbe_aci_get_link_info(hw, activate, NULL); +} + +/** + * ixgbe_get_media_type_e610 - Gets media type + * @hw: pointer to the HW struct + * + * In order to get the media type, the function gets PHY + * capabilities and later on use them to identify the PHY type + * checking phy_type_high and phy_type_low. + * + * Return: the type of media in form of ixgbe_media_type enum + * or ixgbe_media_type_unknown in case of an error. + */ +enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps; + int rc; + + rc = ixgbe_update_link_info(hw); + if (rc) + return ixgbe_media_type_unknown; + + /* If there is no link but PHY (dongle) is available SW should use + * Get PHY Caps admin command instead of Get Link Status, find most + * significant bit that is set in PHY types reported by the command + * and use it to discover media type. + */ + if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) && + (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) { + int highest_bit; + + /* Get PHY Capabilities */ + rc = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, + &pcaps); + if (rc) + return ixgbe_media_type_unknown; + + highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high)); + if (highest_bit) { + hw->link.link_info.phy_type_high = + BIT_ULL(highest_bit - 1); + hw->link.link_info.phy_type_low = 0; + } else { + highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low)); + if (highest_bit) + hw->link.link_info.phy_type_low = + BIT_ULL(highest_bit - 1); + } + } + + /* Based on link status or search above try to discover media type. */ + hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw); + + return hw->phy.media_type; +} + +/** + * ixgbe_setup_link_e610 - Set up link + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait: true when waiting for completion is needed + * + * Set up the link with the specified speed. + * + * Return: the exit code of the operation. + */ +int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + /* Simply request FW to perform proper PHY setup */ + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** + * ixgbe_check_link_e610 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Determine if the link is up and the current link speed + * using ACI command (0x0607). + * + * Return: the exit code of the operation. + */ +int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + int err; + u32 i; + + if (!speed || !link_up) + return -EINVAL; + + /* Set get_link_info flag to ensure that fresh + * link information will be obtained from FW + * by sending Get Link Status admin command. + */ + hw->link.get_link_info = true; + + /* Update link information in adapter context. */ + err = ixgbe_get_link_status(hw, link_up); + if (err) + return err; + + /* Wait for link up if it was requested. */ + if (link_up_wait_to_complete && !(*link_up)) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + msleep(100); + hw->link.get_link_info = true; + err = ixgbe_get_link_status(hw, link_up); + if (err) + return err; + if (*link_up) + break; + } + } + + /* Use link information in adapter context updated by the call + * to ixgbe_get_link_status() to determine current link speed. + * Link speed information is valid only when link up was + * reported by FW. + */ + if (*link_up) { + switch (hw->link.link_info.link_speed) { + case IXGBE_ACI_LINK_SPEED_10MB: + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + case IXGBE_ACI_LINK_SPEED_100MB: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + case IXGBE_ACI_LINK_SPEED_1000MB: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_ACI_LINK_SPEED_2500MB: + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + break; + case IXGBE_ACI_LINK_SPEED_5GB: + *speed = IXGBE_LINK_SPEED_5GB_FULL; + break; + case IXGBE_ACI_LINK_SPEED_10GB: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + break; + } + } else { + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * ixgbe_get_link_capabilities_e610 - Determine link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determine speed and AN parameters of a link. + * + * Return: the exit code of the operation. + */ +int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + if (!speed || !autoneg) + return -EINVAL; + + *autoneg = true; + *speed = hw->phy.speeds_supported; + + return 0; +} + +/** + * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode + * @hw: pointer to hardware structure + * @cfg: PHY configuration data to set FC mode + * @req_mode: FC mode to configure + * + * Configures PHY Flow Control according to the provided configuration. + * + * Return: the exit code of the operation. + */ +int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg, + enum ixgbe_fc_mode req_mode) +{ + u8 pause_mask = 0x0; + + if (!cfg) + return -EINVAL; + + switch (req_mode) { + case ixgbe_fc_full: + pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; + pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; + break; + case ixgbe_fc_rx_pause: + pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; + break; + case ixgbe_fc_tx_pause: + pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; + break; + default: + break; + } + + /* Clear the old pause settings. */ + cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE | + IXGBE_ACI_PHY_EN_RX_LINK_PAUSE); + + /* Set the new capabilities. */ + cfg->caps |= pause_mask; + + return 0; +} + +/** + * ixgbe_setup_fc_e610 - Set up flow control + * @hw: pointer to hardware structure + * + * Set up flow control. This has to be done during init time. + * + * Return: the exit code of the operation. + */ +int ixgbe_setup_fc_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {}; + struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {}; + int err; + + /* Get the current PHY config */ + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg); + + /* Configure the set PHY data */ + err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode); + if (err) + return err; + + /* If the capabilities have changed, then set the new config */ + if (cfg.caps != pcaps.caps) { + cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; + + err = ixgbe_aci_set_phy_cfg(hw, &cfg); + if (err) + return err; + } + + return err; +} + +/** + * ixgbe_fc_autoneg_e610 - Configure flow control + * @hw: pointer to hardware structure + * + * Configure Flow Control. + */ +void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw) +{ + int err; + + /* Get current link err. + * Current FC mode will be stored in the hw context. + */ + err = ixgbe_aci_get_link_info(hw, false, NULL); + if (err) + goto no_autoneg; + + /* Check if the link is up */ + if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) + goto no_autoneg; + + /* Check if auto-negotiation has completed */ + if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) + goto no_autoneg; + + hw->fc.fc_was_autonegged = true; + return; + +no_autoneg: + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; +} + +/** + * ixgbe_disable_rx_e610 - Disable RX unit + * @hw: pointer to hardware structure + * + * Disable RX DMA unit on E610 with use of ACI command (0x000C). + * + * Return: the exit code of the operation. + */ +void ixgbe_disable_rx_e610(struct ixgbe_hw *hw) +{ + u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + u32 pfdtxgswc; + int err; + + if (!(rxctrl & IXGBE_RXCTRL_RXEN)) + return; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + err = ixgbe_aci_disable_rxen(hw); + + /* If we fail - disable RX using register write */ + if (err) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } +} + +/** + * ixgbe_init_phy_ops_e610 - PHY specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY type was not known. + * + * Return: the exit code of the operation. + */ +int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) + phy->ops.set_phy_power = ixgbe_set_phy_power_e610; + else + phy->ops.set_phy_power = NULL; + + /* Identify the PHY */ + return phy->ops.identify(hw); +} + +/** + * ixgbe_identify_phy_e610 - Identify PHY + * @hw: pointer to hardware structure + * + * Determine PHY type, supported speeds and PHY ID. + * + * Return: the exit code of the operation. + */ +int ixgbe_identify_phy_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps; + u64 phy_type_low, phy_type_high; + int err; + + /* Set PHY type */ + hw->phy.type = ixgbe_phy_fw; + + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps); + if (err) + return err; + + if (!(pcaps.module_compliance_enforcement & + IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) { + /* Handle lenient mode */ + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA, + &pcaps); + if (err) + return err; + } + + /* Determine supported speeds */ + hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN; + phy_type_high = le64_to_cpu(pcaps.phy_type_high); + phy_type_low = le64_to_cpu(pcaps.phy_type_low); + + if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T || + phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL; + if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX || + phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII || + phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX || + phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX || + phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX || + phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII || + phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; + if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA || + phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR || + phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR || + phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 || + phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || + phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C || + phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; + + /* 2.5 and 5 Gbps link speeds must be excluded from the + * auto-negotiation set used during driver initialization due to + * compatibility issues with certain switches. Those issues do not + * exist in case of E610 2.5G SKU device (0x57b1). + */ + if (!hw->phy.autoneg_advertised && + hw->device_id != IXGBE_DEV_ID_E610_2_5G_T) + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + + if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X || + phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX || + phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII || + phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; + + if (!hw->phy.autoneg_advertised && + hw->device_id == IXGBE_DEV_ID_E610_2_5G_T) + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + + if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T || + phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR || + phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + + /* Set PHY ID */ + memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32)); + + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL | + IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + + return 0; +} + +/** + * ixgbe_identify_module_e610 - Identify SFP module type + * @hw: pointer to hardware structure + * + * Identify the SFP module type. + * + * Return: the exit code of the operation. + */ +int ixgbe_identify_module_e610(struct ixgbe_hw *hw) +{ + bool media_available; + u8 module_type; + int err; + + err = ixgbe_update_link_info(hw); + if (err) + return err; + + media_available = + (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE); + + if (media_available) { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + + /* Get module type from hw context updated by + * ixgbe_update_link_info() + */ + module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT]; + + if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) || + (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) { + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) { + hw->phy.sfp_type = ixgbe_sfp_type_sr; + } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) || + (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) { + hw->phy.sfp_type = ixgbe_sfp_type_lr; + } + } else { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return -ENOENT; + } + + return 0; +} + +/** + * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs + * @hw: pointer to hardware structure + * + * Set the parameters for the firmware-controlled PHYs. + * + * Return: the exit code of the operation. + */ +int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data pcaps; + struct ixgbe_aci_cmd_set_phy_cfg_data pcfg; + u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA; + u64 sup_phy_type_low, sup_phy_type_high; + u64 phy_type_low = 0, phy_type_high = 0; + int err; + + err = ixgbe_aci_get_link_info(hw, false, NULL); + if (err) + return err; + + /* If media is not available get default config. */ + if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) + rmode = IXGBE_ACI_REPORT_DFLT_CFG; + + err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps); + if (err) + return err; + + sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low); + sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high); + + /* Get Active configuration to avoid unintended changes. */ + err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG, + &pcaps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg); + + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) { + phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX; + phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X; + phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII; + } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) { + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC; + phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C; + phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII; + } + + /* Mask the set values to avoid requesting unsupported link types. */ + phy_type_low &= sup_phy_type_low; + pcfg.phy_type_low = cpu_to_le64(phy_type_low); + phy_type_high &= sup_phy_type_high; + pcfg.phy_type_high = cpu_to_le64(phy_type_high); + + if (pcfg.phy_type_high != pcaps.phy_type_high || + pcfg.phy_type_low != pcaps.phy_type_low || + pcfg.caps != pcaps.caps) { + pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK; + pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; + + err = ixgbe_aci_set_phy_cfg(hw, &pcfg); + if (err) + return err; + } + + return 0; +} + +/** + * ixgbe_set_phy_power_e610 - Control power for copper PHY + * @hw: pointer to hardware structure + * @on: true for on, false for off + * + * Set the power on/off of the PHY + * by getting its capabilities and setting the appropriate + * configuration parameters. + * + * Return: the exit code of the operation. + */ +int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on) +{ + struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; + struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; + int err; + + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_ACTIVE_CFG, + &phy_caps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); + + if (on) + phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER; + else + phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER; + + /* PHY is already in requested power mode. */ + if (phy_caps.caps == phy_cfg.caps) + return 0; + + phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK; + phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; + + return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); +} + +/** + * ixgbe_enter_lplu_e610 - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the + * X557 PHY immediately prior to entering LPLU. + * + * Return: the exit code of the operation. + */ +int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; + struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; + int err; + + err = ixgbe_aci_get_phy_caps(hw, false, + IXGBE_ACI_REPORT_ACTIVE_CFG, + &phy_caps); + if (err) + return err; + + ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); + + phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG; + + return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); +} + +/** + * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw + * struct in order to set up EEPROM access. + * + * Return: the operation exit code. + */ +int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 gens_stat; + u8 sr_size; + + if (eeprom->type != ixgbe_eeprom_uninitialized) + return 0; + + eeprom->type = ixgbe_flash; + + gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS); + sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); + + /* Switching to words (sr_size contains power of 2). */ + eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB; + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, + eeprom->word_size); + + return 0; +} + +/** + * ixgbe_aci_get_netlist_node - get a node handle + * @hw: pointer to the hw struct + * @cmd: get_link_topo AQ structure + * @node_part_number: output node part number if node found + * @node_handle: output node handle parameter if node found + * + * Get the netlist node and assigns it to + * the provided handle using ACI command (0x06E0). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle) +{ + struct ixgbe_aci_desc desc; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); + desc.params.get_link_topo = *cmd; + + if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0)) + return -EOPNOTSUPP; + + if (node_handle) + *node_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + if (node_part_number) + *node_part_number = desc.params.get_link_topo.node_part_num; + + return 0; +} + +/** + * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * Request NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_acquire_nvm(struct ixgbe_hw *hw, + enum ixgbe_aci_res_access_type access) +{ + u32 fla; + + /* Skip if we are in blank NVM programming mode */ + fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); + if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) + return 0; + + return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access, + IXGBE_NVM_TIMEOUT); +} + +/** + * ixgbe_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * Release NVM ownership. + */ +void ixgbe_release_nvm(struct ixgbe_hw *hw) +{ + u32 fla; + + /* Skip if we are in blank NVM programming mode */ + fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); + if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) + return; + + ixgbe_release_res(hw, IXGBE_NVM_RES_ID); +} + +/** + * ixgbe_aci_read_nvm - read NVM + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @read_shadow_ram: tell if this is a shadow RAM read + * + * Read the NVM using ACI command (0x0701). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram) +{ + struct ixgbe_aci_cmd_nvm *cmd; + struct ixgbe_aci_desc desc; + + if (offset > IXGBE_ACI_NVM_MAX_OFFSET) + return -EINVAL; + + cmd = &desc.params.nvm; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read); + + if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT) + cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY; + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD; + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->offset_low = cpu_to_le16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; + cmd->length = cpu_to_le16(length); + + return ixgbe_aci_send_cmd(hw, &desc, data, length); +} + +/** + * ixgbe_nvm_validate_checksum - validate checksum + * @hw: pointer to the HW struct + * + * Verify NVM PFA checksum validity using ACI command (0x0706). + * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_nvm_checksum *cmd; + struct ixgbe_aci_desc desc; + int err; + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + cmd = &desc.params.nvm_checksum; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum); + cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY; + + err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + + ixgbe_release_nvm(hw); + + if (!err && cmd->checksum != + cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) { + struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, + hw); + + err = -EIO; + netdev_err(adapter->netdev, "Invalid Shadow Ram checksum"); + } + + return err; +} + +/** + * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm. + * + * Return: the exit code of the operation. + */ +int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + u32 bytes = sizeof(u16); + u16 data_local; + int err; + + err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes, + (u8 *)&data_local, true); + if (err) + return err; + + *data = data_local; + return 0; +} + +/** + * ixgbe_read_flat_nvm - Read portion of NVM by flat offset + * @hw: pointer to the HW struct + * @offset: offset from beginning of NVM + * @length: (in) number of bytes to read; (out) number of bytes actually read + * @data: buffer to return data in (sized to fit the specified length) + * @read_shadow_ram: if true, read from shadow RAM instead of NVM + * + * Reads a portion of the NVM, as a flat memory space. This function correctly + * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size + * from being exceeded in case of Shadow RAM read requests and ensures that no + * single read request exceeds the maximum 4KB read for a single admin command. + * + * Returns an error code on failure. Note that the data pointer may be + * partially updated if some reads succeed before a failure. + * + * Return: the exit code of the operation. + */ +int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, + u8 *data, bool read_shadow_ram) +{ + u32 inlen = *length; + u32 bytes_read = 0; + bool last_cmd; + int err; + + /* Verify the length of the read if this is for the Shadow RAM */ + if (read_shadow_ram && ((offset + inlen) > + (hw->eeprom.word_size * 2u))) + return -EINVAL; + + do { + u32 read_size, sector_offset; + + /* ixgbe_aci_read_nvm cannot read more than 4KB at a time. + * Additionally, a read from the Shadow RAM may not cross over + * a sector boundary. Conveniently, the sector size is also 4KB. + */ + sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE; + read_size = min_t(u32, + IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset, + inlen - bytes_read); + + last_cmd = !(bytes_read + read_size < inlen); + + /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size + * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE + * maximum size guarantees that it will fit within the 2 bytes. + */ + err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT, + offset, (u16)read_size, + data + bytes_read, last_cmd, + read_shadow_ram); + if (err) + break; + + bytes_read += read_size; + offset += read_size; + } while (!last_cmd); + + *length = bytes_read; + return err; +} + +/** + * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF) + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM + * ownership. + * + * Return: the operation exit code. + */ +int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, + u16 *data) +{ + u32 bytes = *words * 2; + int err; + + err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); + if (err) + return err; + + *words = bytes / 2; + + for (int i = 0; i < *words; i++) + data[i] = le16_to_cpu(((__le16 *)data)[i]); + + return 0; +} + +/** + * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the ACI. + * If the EEPROM params are not initialized, the function + * initialize them before proceeding with reading. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + int err; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + err = hw->eeprom.ops.init_params(hw); + if (err) + return err; + } + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + err = ixgbe_read_sr_word_aci(hw, offset, data); + ixgbe_release_nvm(hw); + + return err; +} + +/** + * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI + * @hw: pointer to hardware structure + * @offset: offset of words in the EEPROM to read + * @words: number of words to read + * @data: words to read from the EEPROM + * + * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params + * prior to the read. Acquire/release the NVM ownership. + * + * Return: the operation exit code. + */ +int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + int err; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + err = hw->eeprom.ops.init_params(hw); + if (err) + return err; + } + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + err = ixgbe_read_sr_buf_aci(hw, offset, &words, data); + ixgbe_release_nvm(hw); + + return err; +} + +/** + * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + * If the EEPROM params are not initialized, the function + * initialize them before proceeding. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val) +{ + int err; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + err = hw->eeprom.ops.init_params(hw); + if (err) + return err; + } + + err = ixgbe_nvm_validate_checksum(hw); + if (err) + return err; + + if (checksum_val) { + u16 tmp_checksum; + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD, + &tmp_checksum); + ixgbe_release_nvm(hw); + + if (!err) + *checksum_val = tmp_checksum; + } + + return err; +} + +/** + * ixgbe_reset_hw_e610 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, and performs a reset. + * + * Return: the exit code of the operation. + */ +int ixgbe_reset_hw_e610(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 ctrl, i; + int err; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + err = hw->mac.ops.stop_adapter(hw); + if (err) + goto reset_hw_out; + + /* Flush pending Tx transactions. */ + ixgbe_clear_tx_pending(hw); + + hw->phy.ops.init(hw); +mac_reset_top: + err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (err) + return -EBUSY; + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, + hw); + + err = -EIO; + netdev_err(adapter->netdev, "Reset polling failed to complete."); + } + + /* Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + msleep(100); + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17)); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Maximum number of Receive Address Registers. */ +#define IXGBE_MAX_NUM_RAR 128 + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to the + * maximum number of Receive Address Registers, since we modify this + * value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR; + hw->mac.ops.init_rx_addrs(hw); + + /* Initialize bus function number */ + hw->mac.ops.set_lan_id(hw); + +reset_hw_out: + return err; +} + +static const struct ixgbe_mac_operations mac_ops_e610 = { + .init_hw = ixgbe_init_hw_generic, + .start_hw = ixgbe_start_hw_X540, + .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic, + .enable_rx_dma = ixgbe_enable_rx_dma_generic, + .get_mac_addr = ixgbe_get_mac_addr_generic, + .get_device_caps = ixgbe_get_device_caps_generic, + .stop_adapter = ixgbe_stop_adapter_generic, + .set_lan_id = ixgbe_set_lan_id_multi_port_pcie, + .set_rxpba = ixgbe_set_rxpba_generic, + .check_link = ixgbe_check_link_e610, + .blink_led_start = ixgbe_blink_led_start_X540, + .blink_led_stop = ixgbe_blink_led_stop_X540, + .set_rar = ixgbe_set_rar_generic, + .clear_rar = ixgbe_clear_rar_generic, + .set_vmdq = ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic, + .clear_vmdq = ixgbe_clear_vmdq_generic, + .init_rx_addrs = ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = ixgbe_update_mc_addr_list_generic, + .enable_mc = ixgbe_enable_mc_generic, + .disable_mc = ixgbe_disable_mc_generic, + .clear_vfta = ixgbe_clear_vfta_generic, + .set_vfta = ixgbe_set_vfta_generic, + .fc_enable = ixgbe_fc_enable_generic, + .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550, + .init_uta_tables = ixgbe_init_uta_tables_generic, + .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing, + .set_source_address_pruning = + ixgbe_set_source_address_pruning_x550, + .set_ethertype_anti_spoofing = + ixgbe_set_ethertype_anti_spoofing_x550, + .disable_rx_buff = ixgbe_disable_rx_buff_generic, + .enable_rx_buff = ixgbe_enable_rx_buff_generic, + .enable_rx = ixgbe_enable_rx_generic, + .disable_rx = ixgbe_disable_rx_e610, + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_e610, + .get_media_type = ixgbe_get_media_type_e610, + .setup_link = ixgbe_setup_link_e610, + .get_link_capabilities = ixgbe_get_link_capabilities_e610, + .get_bus_info = ixgbe_get_bus_info_generic, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = ixgbe_release_swfw_sync_X540, + .init_swfw_sync = ixgbe_init_swfw_sync_X540, + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_e610, + .fc_autoneg = ixgbe_fc_autoneg_e610, +}; + +static const struct ixgbe_phy_operations phy_ops_e610 = { + .init = ixgbe_init_phy_ops_e610, + .identify = ixgbe_identify_phy_e610, + .identify_sfp = ixgbe_identify_module_e610, + .setup_link_speed = ixgbe_setup_phy_link_speed_generic, + .setup_link = ixgbe_setup_phy_link_e610, + .enter_lplu = ixgbe_enter_lplu_e610, +}; + +static const struct ixgbe_eeprom_operations eeprom_ops_e610 = { + .read = ixgbe_read_ee_aci_e610, + .read_buffer = ixgbe_read_ee_aci_buffer_e610, + .validate_checksum = ixgbe_validate_eeprom_checksum_e610, +}; + +const struct ixgbe_info ixgbe_e610_info = { + .mac = ixgbe_mac_e610, + .get_invariants = ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_e610, + .eeprom_ops = &eeprom_ops_e610, + .phy_ops = &phy_ops_e610, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h new file mode 100644 index 000000000000..ba8c06b73810 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IXGBE_E610_H_ +#define _IXGBE_E610_H_ + +#include "ixgbe_type.h" + +int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc, + void *buf, u16 buf_size); +bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw); +int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e, + bool *pending); +void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode); +int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, + enum ixgbe_aci_res_access_type access, u32 timeout); +void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res); +int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size, + u32 *cap_count, enum ixgbe_aci_opc opc); +int ixgbe_discover_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_dev_caps *dev_caps); +int ixgbe_discover_func_caps(struct ixgbe_hw *hw, + struct ixgbe_hw_func_caps *func_caps); +int ixgbe_get_caps(struct ixgbe_hw *hw); +int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw); +int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode, + struct ixgbe_aci_cmd_get_phy_caps_data *pcaps); +void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg); +int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg); +int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link); +int ixgbe_update_link_info(struct ixgbe_hw *hw); +int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up); +int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse, + struct ixgbe_link_status *link); +int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask); +int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask); +enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw); +int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait); +int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_set_phy_cfg_data *cfg, + enum ixgbe_fc_mode req_mode); +int ixgbe_setup_fc_e610(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw); +void ixgbe_disable_rx_e610(struct ixgbe_hw *hw); +int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw); +int ixgbe_identify_phy_e610(struct ixgbe_hw *hw); +int ixgbe_identify_module_e610(struct ixgbe_hw *hw); +int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw); +int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on); +int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw); +int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw); +int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw, + struct ixgbe_aci_cmd_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle); +int ixgbe_acquire_nvm(struct ixgbe_hw *hw, + enum ixgbe_aci_res_access_type access); +void ixgbe_release_nvm(struct ixgbe_hw *hw); +int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram); +int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw); +int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data); +int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, + u8 *data, bool read_shadow_ram); +int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, + u16 *data); +int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data); +int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val); +int ixgbe_reset_hw_e610(struct ixgbe_hw *hw); + +#endif /* _IXGBE_E610_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9482e0cca8b7..da91c582d439 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ /* ethtool support for ixgbe */ @@ -690,6 +690,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -1613,6 +1614,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1874,6 +1876,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1935,6 +1938,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 16fa621ce0ff..336d47ffb95a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_sriov.h" @@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2e38e8f6fac1..467f81239e12 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -42,6 +42,7 @@ #include "ixgbe.h" #include "ixgbe_common.h" +#include "ixgbe_e610.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_mbx.h" #include "ixgbe_phy.h" @@ -73,6 +74,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, [board_x550em_a] = &ixgbe_x550em_a_info, [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, + [board_e610] = &ixgbe_e610_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -131,6 +133,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610}, /* required last entry */ {0, } }; @@ -173,6 +180,8 @@ static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *); +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *); static const struct net_device_ops ixgbe_netdev_ops; @@ -236,8 +245,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) * bandwidth details should be gathered from the parent bus instead of from the * device. Used to ensure that various locations all have the correct device ID * checks. + * + * Return: true if information should be collected from the parent bus, false + * otherwise */ -static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) +static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_82599_SFP_SF_QP: @@ -876,6 +888,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -915,6 +928,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -1025,7 +1039,7 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) return ((head <= tail) ? tail : tail + ring->count) - head; } -static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) { u32 tx_done = ixgbe_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; @@ -1909,10 +1923,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, { struct net_device *netdev = rx_ring->netdev; - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - /* Verify netdev is present, and that packet does not have any * errors that would be unacceptable to the netdev. */ @@ -2095,7 +2105,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { - if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { + if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) { /* the page has been released from the ring */ IXGBE_CB(skb)->page_released = true; } else { @@ -2220,9 +2230,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - struct xdp_buff *xdp) +static int ixgbe_run_xdp(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + struct xdp_buff *xdp) { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; @@ -2272,7 +2282,7 @@ out_failure: break; } xdp_out: - return ERR_PTR(-result); + return result; } static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, @@ -2330,6 +2340,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int offset = rx_ring->rx_offset; unsigned int xdp_xmit = 0; struct xdp_buff xdp; + int xdp_res = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -2375,12 +2386,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); #endif - skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); + xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -2400,7 +2409,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; rx_buffer->pagecnt_bias++; break; @@ -2414,7 +2423,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, continue; /* verify the packet layout is correct */ - if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) + if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) continue; /* probably a little skewed due to removing CRC */ @@ -2515,6 +2524,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2528,6 +2538,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) IXGBE_EIMS_MAILBOX | IXGBE_EIMS_LSC); + if (adapter->hw.mac.type == ixgbe_mac_e610) + mask &= ~IXGBE_EIMS_FW_EVENT; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } @@ -2744,6 +2757,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2966,6 +2980,218 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_check_phy_fw_load - check if PHY FW load failed + * @adapter: pointer to adapter structure + * @link_cfg_err: bitmap from the link info structure + * + * Check if external PHY FW load failed and print an error message if it did. + */ +static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter, + u8 link_cfg_err) +{ + if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { + adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED; + return; + } + + if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED) + return; + + if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) { + netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); + adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED; + } +} + +/** + * ixgbe_check_module_power - check module power level + * @adapter: pointer to adapter structure + * @link_cfg_err: bitmap from the link info structure + * + * Check module power level returned by a previous call to aci_get_link_info + * and print error messages if module power level is not supported. + */ +static void ixgbe_check_module_power(struct ixgbe_adapter *adapter, + u8 link_cfg_err) +{ + /* If module power level is supported, clear the flag. */ + if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT | + IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) { + adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; + return; + } + + /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the + * above block didn't clear this bit, there's nothing to do. + */ + if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED) + return; + + if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) { + netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n"); + adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; + } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) { + netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n"); + adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; + } +} + +/** + * ixgbe_check_link_cfg_err - check if link configuration failed + * @adapter: pointer to adapter structure + * @link_cfg_err: bitmap from the link info structure + * + * Print if any link configuration failure happens due to the value in the + * link_cfg_err parameter in the link info structure. + */ +static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter, + u8 link_cfg_err) +{ + ixgbe_check_module_power(adapter, link_cfg_err); + ixgbe_check_phy_fw_load(adapter, link_cfg_err); +} + +/** + * ixgbe_process_link_status_event - process the link event + * @adapter: pointer to adapter structure + * @link_up: true if the physical link is up and false if it is down + * @link_speed: current link speed received from the link event + * + * Return: 0 on success or negative value on failure. + */ +static int +ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up, + u16 link_speed) +{ + struct ixgbe_hw *hw = &adapter->hw; + int status; + + /* Update the link info structures and re-enable link events, + * don't bail on failure due to other book keeping needed. + */ + status = ixgbe_update_link_info(hw); + if (status) + e_dev_err("Failed to update link status, err %d aq_err %d\n", + status, hw->aci.last_status); + + ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err); + + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) + link_up = true; + + /* Turn off PHY if media was removed. */ + if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) && + !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) + adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA; + + if (link_up == adapter->link_up && + link_up == netif_carrier_ok(adapter->netdev) && + link_speed == adapter->link_speed) + return 0; + + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + ixgbe_watchdog_update_link(adapter); + + if (link_up) + ixgbe_watchdog_link_is_up(adapter); + else + ixgbe_watchdog_link_is_down(adapter); + + return 0; +} + +/** + * ixgbe_handle_link_status_event - handle link status event via ACI + * @adapter: pointer to adapter structure + * @e: event structure containing link status info + */ +static void +ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter, + struct ixgbe_aci_event *e) +{ + struct ixgbe_aci_cmd_get_link_status_data *link_data; + u16 link_speed; + bool link_up; + + link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf; + + link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP); + link_speed = le16_to_cpu(link_data->link_speed); + + if (ixgbe_process_link_status_event(adapter, link_up, link_speed)) + e_dev_warn("Could not process link status event"); +} + +/** + * ixgbe_schedule_fw_event - schedule Firmware event + * @adapter: pointer to the adapter structure + * + * If the adapter is not in down, removing or resetting state, + * an event is scheduled. + */ +static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter) +{ + if (!test_bit(__IXGBE_DOWN, &adapter->state) && + !test_bit(__IXGBE_REMOVING, &adapter->state) && + !test_bit(__IXGBE_RESETTING, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT; + ixgbe_service_event_schedule(adapter); + } +} + +/** + * ixgbe_aci_event_cleanup - release msg_buf memory + * @event: pointer to the event holding msg_buf to be released + * + * Clean memory allocated for event's msg_buf. Implements auto memory cleanup. + */ +static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event) +{ + kfree(event->msg_buf); +} + +/** + * ixgbe_handle_fw_event - handle Firmware event + * @adapter: pointer to the adapter structure + * + * Obtain an event from the ACI and then and then process it according to the + * type of the event and the opcode. + */ +static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter) +{ + struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup); + struct ixgbe_hw *hw = &adapter->hw; + bool pending = false; + int err; + + if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT) + adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT; + event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return; + + do { + err = ixgbe_aci_get_event(hw, &event, &pending); + if (err) + break; + + switch (le16_to_cpu(event.desc.opcode)) { + case ixgbe_aci_opc_get_link_status: + ixgbe_handle_link_status_event(adapter, &event); + break; + default: + e_warn(hw, "unknown FW async event captured\n"); + break; + } + } while (pending); +} + static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { @@ -2982,6 +3208,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -3035,6 +3262,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_e610: + mask |= IXGBE_EIMS_FW_EVENT; + fallthrough; case ixgbe_mac_x550em_a: if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || @@ -3091,12 +3321,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) if (eicr & IXGBE_EICR_MAILBOX) ixgbe_msg_task(adapter); + if (eicr & IXGBE_EICR_FW_EVENT) + ixgbe_schedule_fw_event(adapter); + switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; @@ -3334,6 +3568,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data) if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); + if (eicr & IXGBE_EICR_FW_EVENT) + ixgbe_schedule_fw_event(adapter); + switch (hw->mac.type) { case ixgbe_mac_82599EB: ixgbe_check_sfp_event(adapter, eicr); @@ -3342,6 +3579,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); @@ -3442,6 +3680,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -4359,6 +4598,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; fallthrough; @@ -4526,6 +4766,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4564,6 +4805,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -5148,6 +5390,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -5208,6 +5451,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -5510,6 +5754,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) } /** + * ixgbe_enable_link_status_events - enable link status events + * @adapter: pointer to the adapter structure + * @mask: event mask to be set + * + * Enables link status events by invoking ixgbe_configure_lse() + * + * Return: the exit code of the operation. + */ +static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter, + u16 mask) +{ + int err; + + err = ixgbe_configure_lse(&adapter->hw, true, mask); + if (err) + return err; + + adapter->lse_mask = mask; + return 0; +} + +/** + * ixgbe_disable_link_status_events - disable link status events + * @adapter: pointer to the adapter structure + * + * Disables link status events by invoking ixgbe_configure_lse() + * + * Return: the exit code of the operation. + */ +static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter) +{ + int err; + + err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask); + if (err) + return err; + + adapter->lse_mask = 0; + return 0; +} + +/** * ixgbe_sfp_link_config - set up SFP+ link * @adapter: pointer to private adapter struct **/ @@ -5532,13 +5818,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) * ixgbe_non_sfp_link_config - set up non-SFP+ link * @hw: pointer to private hardware struct * - * Returns 0 on success, negative on failure + * Configure non-SFP link. + * + * Return: 0 on success, negative on failure **/ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { - u32 speed; + struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, + hw); + u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN | + IXGBE_ACI_LINK_EVENT_MEDIA_NA | + IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL | + IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL)); bool autoneg, link_up = false; int ret = -EIO; + u32 speed; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); @@ -5561,13 +5855,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) if (ret) return ret; - if (hw->mac.ops.setup_link) + if (hw->mac.ops.setup_link) { + if (adapter->hw.mac.type == ixgbe_mac_e610) { + ret = ixgbe_enable_link_status_events(adapter, mask); + if (ret) + return ret; + } ret = hw->mac.ops.setup_link(hw, speed, link_up); + } return ret; } /** + * ixgbe_check_media_subtask - check for media + * @adapter: pointer to adapter structure + * + * If media is available then initialize PHY user configuration. Configure the + * PHY if the interface is up. + */ +static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* No need to check for media if it's already present */ + if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA)) + return; + + /* Refresh link info and check if media is present */ + if (ixgbe_update_link_info(hw)) + return; + + ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err); + + if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) { + /* PHY settings are reset on media insertion, reconfigure + * PHY to preserve settings. + */ + if (!(ixgbe_non_sfp_link_config(&adapter->hw))) + adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA; + + /* A Link Status Event will be generated; the event handler + * will complete bringing the interface up + */ + } +} + +/** * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset * @adapter: board private structure * @@ -5630,6 +5964,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -5980,6 +6315,7 @@ dma_engine_disable: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -6224,6 +6560,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); + if (adapter->hw.mac.type == ixgbe_mac_e610) + ixgbe_disable_link_status_events(adapter); } /** @@ -6279,6 +6617,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X540: case ixgbe_mac_X550: + case ixgbe_mac_e610: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; @@ -6342,6 +6681,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; + hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; + /* get_invariants needs the device IDs */ ii->get_invariants(hw); @@ -6909,6 +7250,19 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); udp_tunnel_nic_reset_ntf(netdev); + if (adapter->hw.mac.type == ixgbe_mac_e610) { + int err = ixgbe_update_link_info(&adapter->hw); + + if (err) + e_dev_err("Failed to update link info, err %d.\n", err); + + ixgbe_check_link_cfg_err(adapter, + adapter->hw.link.link_info.link_cfg_err); + + err = ixgbe_non_sfp_link_config(&adapter->hw); + if (ixgbe_non_sfp_link_config(&adapter->hw)) + e_dev_err("Link setup failed, err %d.\n", err); + } return 0; @@ -7062,6 +7416,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -7209,6 +7564,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -7221,11 +7577,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) for (i = 0; i < 16; i++) { hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540) || - (hw->mac.type == ixgbe_mac_X550) || - (hw->mac.type == ixgbe_mac_X550EM_x) || - (hw->mac.type == ixgbe_mac_x550em_a)) { + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_x550em_a || + hw->mac.type == ixgbe_mac_e610) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -7251,6 +7608,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); @@ -7551,6 +7909,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -8052,6 +8411,11 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } + if (adapter->hw.mac.type == ixgbe_mac_e610) { + if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT) + ixgbe_handle_fw_event(adapter); + ixgbe_check_media_subtask(adapter); + } ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@ -10771,6 +11135,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } /** + * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters + * @adapter: the adapter private structure + * + * This function is used by probe and ethtool to determine the FW version to + * format to display. The FW version is taken from the EEPROM/NVM. + * + */ +static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter) +{ + struct ixgbe_orom_info *orom = &adapter->hw.flash.orom; + struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, + nvm->eetrack, orom->major, orom->build, orom->patch); +} + +/** * ixgbe_set_fw_version - Set FW version * @adapter: the adapter private structure * @@ -10782,6 +11164,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_nvm_version nvm_ver; + if (adapter->hw.mac.type == ixgbe_mac_e610) { + ixgbe_set_fw_version_e610(adapter); + return; + } + ixgbe_get_oem_prod_version(hw, &nvm_ver); if (nvm_ver.oem_valid) { snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), @@ -10868,6 +11255,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #else indices = IXGBE_MAX_RSS_INDICES; #endif + } else if (ii->mac == ixgbe_mac_e610) { + indices = IXGBE_MAX_RSS_INDICES_X550; } netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); @@ -10933,12 +11322,19 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + if (adapter->hw.mac.type == ixgbe_mac_e610) { + err = ixgbe_get_caps(&adapter->hw); + if (err) + dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err); + } + if (adapter->hw.mac.type == ixgbe_mac_82599EB) adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_e610: netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; break; case ixgbe_mac_x550em_a: @@ -10959,6 +11355,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -11130,6 +11527,8 @@ skip_sriov: ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ixgbe_mac_set_default_filter(adapter); + if (hw->mac.type == ixgbe_mac_e610) + mutex_init(&hw->aci.lock); timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); if (ixgbe_removed(hw->hw_addr)) { @@ -11275,6 +11674,8 @@ err_netdev: err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); + if (hw->mac.type == ixgbe_mac_e610) + mutex_destroy(&adapter->hw.aci.lock); err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; @@ -11321,6 +11722,11 @@ static void ixgbe_remove(struct pci_dev *pdev) set_bit(__IXGBE_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); + if (adapter->hw.mac.type == ixgbe_mac_e610) { + ixgbe_disable_link_status_events(adapter); + mutex_destroy(&adapter->hw.aci.lock); + } + if (adapter->mii_bus) mdiobus_unregister(adapter->mii_bus); @@ -11452,6 +11858,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_x550em_a: device_id = IXGBE_DEV_ID_X550EM_A_VF; break; + case ixgbe_mac_e610: + device_id = IXGBE_DEV_ID_E610_VF; + break; default: device_id = 0; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index d67d77e5dacc..788b5af07c70 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: @@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_x550em_a && + hw->mac.type != ixgbe_mac_e610 && hw->mac.type != ixgbe_mac_X540) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 07eaa3c3f4d3..0a03a8bb5f88 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, MDIO_MMD_AN, &autoneg_reg); - if (hw->mac.type == ixgbe_mac_X550) { + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) { /* Set or unset auto-negotiation 5G advertisement */ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && @@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_e610: hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 9baccacd02a1..5fdf32d79d82 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBE_TYPE_H_ #define _IXGBE_TYPE_H_ @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/mdio.h> #include <linux/netdevice.h> +#include "ixgbe_type_e610.h" /* Device IDs */ #define IXGBE_DEV_ID_82598 0x10B6 @@ -71,12 +72,19 @@ #define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 #define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 +#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE +#define IXGBE_DEV_ID_E610_SFP 0x57AF +#define IXGBE_DEV_ID_E610_10G_T 0x57B0 +#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1 +#define IXGBE_DEV_ID_E610_SGMII 0x57B2 + /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 +#define IXGBE_DEV_ID_E610_VF 0x57AD #define IXGBE_CAT(r, m) IXGBE_##r##_##m @@ -1600,7 +1608,7 @@ enum { #define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ #define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */ #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ #define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ @@ -1636,6 +1644,7 @@ enum { #define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ #define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) @@ -1654,6 +1663,7 @@ enum { #define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ #define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ #define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ @@ -1673,6 +1683,7 @@ enum { #define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ #define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) @@ -2068,6 +2079,7 @@ enum { #define IXGBE_SAN_MAC_ADDR_PTR 0x28 #define IXGBE_DEVICE_CAPS 0x2C #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2 #define IXGBE_PCIE_MSIX_82599_CAPS 0x72 #define IXGBE_MAX_MSIX_VECTORS_82599 0x40 #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 @@ -2168,6 +2180,7 @@ enum { #define IXGBE_PCI_DEVICE_STATUS 0xAA #define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 #define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_LINK_STATUS_E610 0x82 #define IXGBE_PCI_DEVICE_CONTROL2 0xC8 #define IXGBE_PCI_LINK_WIDTH 0x3F0 #define IXGBE_PCI_LINK_WIDTH_1 0x10 @@ -2288,6 +2301,7 @@ enum { #define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/ #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ #define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ @@ -2351,6 +2365,7 @@ enum { /* Multiple Transmit Queue Command Register */ #define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ #define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */ #define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ #define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ #define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ @@ -2970,6 +2985,29 @@ typedef u32 ixgbe_link_speed; IXGBE_LINK_SPEED_1GB_FULL | \ IXGBE_LINK_SPEED_10GB_FULL) +/* Physical layer type */ +typedef u64 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 +#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 +#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000 +#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000 + /* Flow Control Data Sheet defined values * Calculation and defines taken from 802.1bb Annex O */ @@ -3145,6 +3183,8 @@ enum ixgbe_mac_type { ixgbe_mac_X550, ixgbe_mac_X550EM_x, ixgbe_mac_x550em_a, + ixgbe_mac_e610, + ixgbe_mac_e610_vf, ixgbe_num_macs }; @@ -3224,7 +3264,9 @@ enum ixgbe_media_type { ixgbe_media_type_copper, ixgbe_media_type_backplane, ixgbe_media_type_cx4, - ixgbe_media_type_virtual + ixgbe_media_type_virtual, + ixgbe_media_type_da, + ixgbe_media_type_aui, }; /* Flow Control Settings */ @@ -3233,7 +3275,8 @@ enum ixgbe_fc_mode { ixgbe_fc_rx_pause, ixgbe_fc_tx_pause, ixgbe_fc_full, - ixgbe_fc_default + ixgbe_fc_default, + ixgbe_fc_pfc, }; /* Smart Speed Settings */ @@ -3533,6 +3576,9 @@ struct ixgbe_link_operations { struct ixgbe_link_info { struct ixgbe_link_operations ops; u8 addr; + struct ixgbe_link_status link_info; + struct ixgbe_link_status link_info_old; + u8 get_link_info; }; struct ixgbe_eeprom_info { @@ -3575,6 +3621,7 @@ struct ixgbe_mac_info { u8 san_mac_rar_index; struct ixgbe_thermal_sensor_data thermal_sensor_data; bool set_lben; + u32 max_link_up_time; u8 led_link_act; }; @@ -3599,6 +3646,10 @@ struct ixgbe_phy_info { bool reset_if_overtemp; bool qsfp_shared_i2c_bus; u32 nw_mng_if_sel; + u64 phy_type_low; + u64 phy_type_high; + u16 curr_user_speed_req; + struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg; }; struct ixgbe_mbx_stats { @@ -3643,6 +3694,19 @@ struct ixgbe_hw { bool allow_unsupported_sfp; bool wol_enabled; bool need_crosstalk_fix; + u8 api_branch; + u8 api_maj_ver; + u8 api_min_ver; + u8 api_patch; + u8 fw_branch; + u8 fw_maj_ver; + u8 fw_min_ver; + u8 fw_patch; + u32 fw_build; + struct ixgbe_aci_info aci; + struct ixgbe_flash_info flash; + struct ixgbe_hw_dev_caps dev_caps; + struct ixgbe_hw_func_caps func_caps; }; struct ixgbe_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h new file mode 100644 index 000000000000..8d06ade3c7cd --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h @@ -0,0 +1,1074 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IXGBE_TYPE_E610_H_ +#define _IXGBE_TYPE_E610_H_ + +#define BYTES_PER_DWORD 4 + +/* General E610 defines */ +#define IXGBE_MAX_VSI 768 + +/* Checksum and Shadow RAM pointers */ +#define E610_SR_SW_CHECKSUM_WORD 0x3F + +/* Shadow RAM related */ +#define IXGBE_SR_WORDS_IN_1KB 512 + +/* Firmware Status Register (GL_FWSTS) */ +#define GL_FWSTS 0x00083048 /* Reset Source: POR */ +#define GL_FWSTS_EP_PF0 BIT(24) +#define GL_FWSTS_EP_PF1 BIT(25) + +/* Global NVM General Status Register */ +#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */ +#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5) + +/* Flash Access Register */ +#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */ +#define IXGBE_GLNVM_FLA_LOCKED_S 6 +#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6) + +/* Admin Command Interface (ACI) registers */ +#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4)) +#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4)) +#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4)) +#define IXGBE_PF_HICR 0x00082048 + +#define IXGBE_PF_HICR_EN BIT(0) +#define IXGBE_PF_HICR_C BIT(1) +#define IXGBE_PF_HICR_SV BIT(2) +#define IXGBE_PF_HICR_EV BIT(3) + +#define IXGBE_ACI_DESC_SIZE 32 +#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD) + +#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */ +#define IXGBE_ACI_SEND_DELAY_TIME_MS 10 +#define IXGBE_ACI_SEND_MAX_EXECUTE 3 +#define IXGBE_ACI_SEND_TIMEOUT_MS \ + (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS) +/* [ms] timeout of waiting for sync response */ +#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000 +/* [ms] timeout of waiting for async response */ +#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000 +/* [ms] timeout of waiting for resource release */ +#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000 + +/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ +#define IXGBE_ACI_LG_BUF 512 + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +#define IXGBE_ACI_FLAG_DD BIT(0) /* 0x1 */ +#define IXGBE_ACI_FLAG_CMP BIT(1) /* 0x2 */ +#define IXGBE_ACI_FLAG_ERR BIT(2) /* 0x4 */ +#define IXGBE_ACI_FLAG_VFE BIT(3) /* 0x8 */ +#define IXGBE_ACI_FLAG_LB BIT(9) /* 0x200 */ +#define IXGBE_ACI_FLAG_RD BIT(10) /* 0x400 */ +#define IXGBE_ACI_FLAG_VFC BIT(11) /* 0x800 */ +#define IXGBE_ACI_FLAG_BUF BIT(12) /* 0x1000 */ +#define IXGBE_ACI_FLAG_SI BIT(13) /* 0x2000 */ +#define IXGBE_ACI_FLAG_EI BIT(14) /* 0x4000 */ +#define IXGBE_ACI_FLAG_FE BIT(15) /* 0x8000 */ + +/* Admin Command Interface (ACI) error codes */ +enum ixgbe_aci_err { + IXGBE_ACI_RC_OK = 0, /* Success */ + IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */ + IXGBE_ACI_RC_ENOENT = 2, /* No such element */ + IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */ + IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */ + IXGBE_ACI_RC_EIO = 5, /* I/O error */ + IXGBE_ACI_RC_ENXIO = 6, /* No such resource */ + IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */ + IXGBE_ACI_RC_EAGAIN = 8, /* Try again */ + IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */ + IXGBE_ACI_RC_EACCES = 10, /* Permission denied */ + IXGBE_ACI_RC_EFAULT = 11, /* Bad address */ + IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */ + IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */ + IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */ + IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */ + IXGBE_ACI_RC_ENOSPC = 16, /* No space left or alloc failure */ + IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */ + IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */ + IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */ + IXGBE_ACI_RC_EFBIG = 22, /* File too big */ + IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */ + IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */ + IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */ + IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */ + IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */ + IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ + IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */ +}; + +/* Admin Command Interface (ACI) opcodes */ +enum ixgbe_aci_opc { + ixgbe_aci_opc_get_ver = 0x0001, + ixgbe_aci_opc_driver_ver = 0x0002, + ixgbe_aci_opc_get_exp_err = 0x0005, + + /* resource ownership */ + ixgbe_aci_opc_req_res = 0x0008, + ixgbe_aci_opc_release_res = 0x0009, + + /* device/function capabilities */ + ixgbe_aci_opc_list_func_caps = 0x000A, + ixgbe_aci_opc_list_dev_caps = 0x000B, + + /* safe disable of RXEN */ + ixgbe_aci_opc_disable_rxen = 0x000C, + + /* FW events */ + ixgbe_aci_opc_get_fw_event = 0x0014, + + /* PHY commands */ + ixgbe_aci_opc_get_phy_caps = 0x0600, + ixgbe_aci_opc_set_phy_cfg = 0x0601, + ixgbe_aci_opc_restart_an = 0x0605, + ixgbe_aci_opc_get_link_status = 0x0607, + ixgbe_aci_opc_set_event_mask = 0x0613, + ixgbe_aci_opc_get_link_topo = 0x06E0, + ixgbe_aci_opc_get_link_topo_pin = 0x06E1, + ixgbe_aci_opc_read_i2c = 0x06E2, + ixgbe_aci_opc_write_i2c = 0x06E3, + ixgbe_aci_opc_read_mdio = 0x06E4, + ixgbe_aci_opc_write_mdio = 0x06E5, + ixgbe_aci_opc_set_gpio_by_func = 0x06E6, + ixgbe_aci_opc_get_gpio_by_func = 0x06E7, + ixgbe_aci_opc_set_gpio = 0x06EC, + ixgbe_aci_opc_get_gpio = 0x06ED, + ixgbe_aci_opc_sff_eeprom = 0x06EE, + ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2, + ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3, + + /* NVM commands */ + ixgbe_aci_opc_nvm_read = 0x0701, + ixgbe_aci_opc_nvm_erase = 0x0702, + ixgbe_aci_opc_nvm_write = 0x0703, + ixgbe_aci_opc_nvm_cfg_read = 0x0704, + ixgbe_aci_opc_nvm_cfg_write = 0x0705, + ixgbe_aci_opc_nvm_checksum = 0x0706, + ixgbe_aci_opc_nvm_write_activate = 0x0707, + ixgbe_aci_opc_nvm_sr_dump = 0x0707, + ixgbe_aci_opc_nvm_save_factory_settings = 0x0708, + ixgbe_aci_opc_nvm_update_empr = 0x0709, + ixgbe_aci_opc_nvm_pkg_data = 0x070A, + ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B, + + /* Alternate Structure Commands */ + ixgbe_aci_opc_write_alt_direct = 0x0900, + ixgbe_aci_opc_write_alt_indirect = 0x0901, + ixgbe_aci_opc_read_alt_direct = 0x0902, + ixgbe_aci_opc_read_alt_indirect = 0x0903, + ixgbe_aci_opc_done_alt_write = 0x0904, + ixgbe_aci_opc_clear_port_alt_write = 0x0906, + + /* debug commands */ + ixgbe_aci_opc_debug_dump_internals = 0xFF08, + + /* SystemDiagnostic commands */ + ixgbe_aci_opc_set_health_status_config = 0xFF20, + ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21, + ixgbe_aci_opc_get_health_status = 0xFF22, + ixgbe_aci_opc_clear_health_status = 0xFF23, +}; + +/* Get version (direct 0x0001) */ +struct ixgbe_aci_cmd_get_ver { + __le32 rom_ver; + __le32 fw_build; + u8 fw_branch; + u8 fw_major; + u8 fw_minor; + u8 fw_patch; + u8 api_branch; + u8 api_major; + u8 api_minor; + u8 api_patch; +}; + +#define IXGBE_DRV_VER_STR_LEN_E610 32 + +/* Send driver version (indirect 0x0002) */ +struct ixgbe_aci_cmd_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get Expanded Error Code (0x0005, direct) */ +struct ixgbe_aci_cmd_get_exp_err { + __le32 reason; +#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF + __le32 identifier; + u8 rsvd[8]; +}; + +/* FW update timeout definitions are in milliseconds */ +#define IXGBE_NVM_TIMEOUT 180000 + +enum ixgbe_aci_res_access_type { + IXGBE_RES_READ = 1, + IXGBE_RES_WRITE +}; + +enum ixgbe_aci_res_ids { + IXGBE_NVM_RES_ID = 1, + IXGBE_SPD_RES_ID, + IXGBE_CHANGE_LOCK_RES_ID, + IXGBE_GLOBAL_CFG_LOCK_RES_ID +}; + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +struct ixgbe_aci_cmd_req_res { + __le16 res_id; + __le16 access_type; + + /* Upon successful completion, FW writes this value and driver is + * expected to release resource before timeout. This value is provided + * in milliseconds. + */ + __le32 timeout; +#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 +#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 +#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 +#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 + /* For SDP: pin ID of the SDP */ + __le32 res_number; + __le16 status; +#define IXGBE_ACI_RES_GLBL_SUCCESS 0 +#define IXGBE_ACI_RES_GLBL_IN_PROG 1 +#define IXGBE_ACI_RES_GLBL_DONE 2 + u8 reserved[2]; +}; + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct ixgbe_aci_cmd_list_caps { + u8 cmd_flags; + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +/* Device/Function buffer entry, repeated per reported capability */ +struct ixgbe_aci_cmd_list_caps_elem { + __le16 cap; +#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005 +#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8 +#define IXGBE_ACI_CAPS_SRIOV 0x0012 +#define IXGBE_ACI_CAPS_VF 0x0013 +#define IXGBE_ACI_CAPS_VMDQ 0x0014 +#define IXGBE_ACI_CAPS_VSI 0x0017 +#define IXGBE_ACI_CAPS_DCB 0x0018 +#define IXGBE_ACI_CAPS_RSS 0x0040 +#define IXGBE_ACI_CAPS_RXQS 0x0041 +#define IXGBE_ACI_CAPS_TXQS 0x0042 +#define IXGBE_ACI_CAPS_MSIX 0x0043 +#define IXGBE_ACI_CAPS_FD 0x0045 +#define IXGBE_ACI_CAPS_1588 0x0046 +#define IXGBE_ACI_CAPS_MAX_MTU 0x0047 +#define IXGBE_ACI_CAPS_NVM_VER 0x0048 +#define IXGBE_ACI_CAPS_PENDING_NVM_VER 0x0049 +#define IXGBE_ACI_CAPS_OROM_VER 0x004A +#define IXGBE_ACI_CAPS_PENDING_OROM_VER 0x004B +#define IXGBE_ACI_CAPS_PENDING_NET_VER 0x004D +#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070 +#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072 +#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076 +#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 +#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083 +#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084 + u8 major_ver; + u8 minor_ver; + /* Number of resources described by this capability */ + __le32 number; + /* Only meaningful for some types of resources */ + __le32 logical_id; + /* Only meaningful for some types of resources */ + __le32 phys_id; + __le64 rsvd1; + __le64 rsvd2; +}; + +/* Disable RXEN (direct 0x000C) */ +struct ixgbe_aci_cmd_disable_rxen { + u8 lport_num; + u8 reserved[15]; +}; + +/* Get PHY capabilities (indirect 0x0600) */ +struct ixgbe_aci_cmd_get_phy_caps { + u8 lport_num; + u8 reserved; + __le16 param0; + /* 18.0 - Report qualified modules */ +#define IXGBE_ACI_GET_PHY_RQM BIT(0) + /* 18.1 - 18.3 : Report mode + * 000b - Report topology capabilities, without media + * 001b - Report topology capabilities, with media + * 010b - Report Active configuration + * 011b - Report PHY Type and FEC mode capabilities + * 100b - Report Default capabilities + */ +#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1) +#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0 +#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1) +#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2) +#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3) + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is #define of PHY type (Extended): + * The first set of defines is for phy_type_low. + */ +#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0) +#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1) +#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2) +#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3) +#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4) +#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5) +#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6) +#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7) +#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8) +#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9) +#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10) +#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11) +#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12) +#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13) +#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14) +#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15) +#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16) +#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17) +#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18) +#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19) +#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20) +#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21) +#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22) +#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23) +#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24) +#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25) +#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26) +#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27) +#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28) +#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29) +#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29 +/* The second set of defines is for phy_type_high. */ +#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1) +#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2) +#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56) +#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57) +#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58) +#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59) +#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60) +#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61) +#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61 + +struct ixgbe_aci_cmd_get_phy_caps_data { + __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */ + u8 caps; +#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0) +#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1) +#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2) +#define IXGBE_ACI_PHY_EN_LINK BIT(3) +#define IXGBE_ACI_PHY_AN_MODE BIT(4) +#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5) +#define IXGBE_ACI_PHY_EN_LESM BIT(6) +#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7) +#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0) + u8 low_power_ctrl_an; +#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) +#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1) +#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2) +#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3) + __le16 eee_cap; +#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0) +#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1) +#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2) +#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3) +#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4) +#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5) +#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11) + __le16 eeer_value; + u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ + u8 phy_fw_ver[8]; + u8 link_fec_options; +#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) +#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) +#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2) +#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3) +#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4) +#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) +#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) +#define IXGBE_ACI_PHY_FEC_MASK 0xdf + u8 module_compliance_enforcement; +#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0) + u8 extended_compliance_code; +#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3 + u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]; +#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 +#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 +#define IXGBE_ACI_MOD_TYPE_IDENT 1 +#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) +#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6) +#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7) +#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 +#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 + u8 qualified_module_count; + u8 rsvd2[7]; /* Bytes 47:41 reserved */ +#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16 + struct { + u8 v_oui[3]; + u8 rsvd3; + u8 v_part[16]; + __le32 v_rev; + __le64 rsvd4; + } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX]; +}; + +/* Set PHY capabilities (direct 0x0601) + * NOTE: This command must be followed by setup link and restart auto-neg + */ +struct ixgbe_aci_cmd_set_phy_cfg { + u8 lport_num; + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set PHY config command data structure */ +struct ixgbe_aci_cmd_set_phy_cfg_data { + __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */ + u8 caps; +#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef +#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0) +#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1) +#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2) +#define IXGBE_ACI_PHY_ENA_LINK BIT(3) +#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5) +#define IXGBE_ACI_PHY_ENA_LESM BIT(6) +#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7) + u8 low_power_ctrl_an; + __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */ + __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */ + u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */ + u8 module_compliance_enforcement; +}; + +/* Restart AN command data structure (direct 0x0605) + * Also used for response, with only the lport_num field present. + */ +struct ixgbe_aci_cmd_restart_an { + u8 lport_num; + u8 reserved; + u8 cmd_flags; +#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1) +#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2) + u8 reserved2[13]; +}; + +/* Get link status (indirect 0x0607), also used for Link Status Event */ +struct ixgbe_aci_cmd_get_link_status { + u8 lport_num; + u8 reserved; + __le16 cmd_flags; +#define IXGBE_ACI_LSE_M GENMASK(1, 0) +#define IXGBE_ACI_LSE_NOP 0x0 +#define IXGBE_ACI_LSE_DIS 0x2 +#define IXGBE_ACI_LSE_ENA 0x3 + /* only response uses this flag */ +#define IXGBE_ACI_LSE_IS_ENABLED 0x1 + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get link status response data structure, also used for Link Status Event */ +struct ixgbe_aci_cmd_get_link_status_data { + u8 topo_media_conflict; +#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0) +#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1) +#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2) +#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4) +#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5) +#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6) +#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7) + u8 link_cfg_err; +#define IXGBE_ACI_LINK_CFG_ERR BIT(0) +#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1) +#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2) +#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3) +#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4) +#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5) +#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6) +#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7) + u8 link_info; +#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */ +#define IXGBE_ACI_LINK_FAULT BIT(1) +#define IXGBE_ACI_LINK_FAULT_TX BIT(2) +#define IXGBE_ACI_LINK_FAULT_RX BIT(3) +#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4) +#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */ +#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6) +#define IXGBE_ACI_SIGNAL_DETECT BIT(7) + u8 an_info; +#define IXGBE_ACI_AN_COMPLETED BIT(0) +#define IXGBE_ACI_LP_AN_ABILITY BIT(1) +#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */ +#define IXGBE_ACI_FEC_EN BIT(3) +#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */ +#define IXGBE_ACI_LINK_PAUSE_TX BIT(5) +#define IXGBE_ACI_LINK_PAUSE_RX BIT(6) +#define IXGBE_ACI_QUALIFIED_MODULE BIT(7) + u8 ext_info; +#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0) +#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */ + /* Port Tx Suspended */ +#define IXGBE_ACI_LINK_TX_ACTIVE 0 +#define IXGBE_ACI_LINK_TX_DRAINED 1 +#define IXGBE_ACI_LINK_TX_FLUSHED 3 + u8 lb_status; +#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0) +#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1) +#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2) + __le16 max_frame_size; + u8 cfg; +#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0) +#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1) +#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2) +#define IXGBE_ACI_FEC_MASK GENMASK(2, 0) + /* Pacing Config */ +#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3) +#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7) +#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0 +#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M + /* External Device Power Ability */ + u8 power_desc; +#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0) +#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0 +#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2 +#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3 + __le16 link_speed; +#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0) +#define IXGBE_ACI_LINK_SPEED_10MB BIT(0) +#define IXGBE_ACI_LINK_SPEED_100MB BIT(1) +#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2) +#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3) +#define IXGBE_ACI_LINK_SPEED_5GB BIT(4) +#define IXGBE_ACI_LINK_SPEED_10GB BIT(5) +#define IXGBE_ACI_LINK_SPEED_20GB BIT(6) +#define IXGBE_ACI_LINK_SPEED_25GB BIT(7) +#define IXGBE_ACI_LINK_SPEED_40GB BIT(8) +#define IXGBE_ACI_LINK_SPEED_50GB BIT(9) +#define IXGBE_ACI_LINK_SPEED_100GB BIT(10) +#define IXGBE_ACI_LINK_SPEED_200GB BIT(11) +#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15) + __le16 reserved3; + u8 ext_fec_status; +#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */ + u8 reserved4; + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ + /* Get link status version 2 link partner data */ + __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ + u8 lp_fec_adv; +#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0) +#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1) +#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2) +#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3) +#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4) +#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5) + u8 lp_fec_req; +#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0) +#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1) +#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2) +#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3) + u8 lp_flowcontrol; +#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0) +#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1) + u8 reserved5[5]; +} __packed; + +/* Set event mask command (direct 0x0613) */ +struct ixgbe_aci_cmd_set_event_mask { + u8 lport_num; + u8 reserved[7]; + __le16 event_mask; +#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1) +#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2) +#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3) +#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4) +#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5) +#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6) +#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7) +#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) +#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) +#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10) +#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11) +#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12) + u8 reserved1[6]; +}; + +struct ixgbe_aci_cmd_link_topo_params { + u8 lport_num; + u8 lport_num_valid; +#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0) + u8 node_type_ctx; +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0) +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10 +#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4) +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5 +#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6 + u8 index; +}; + +struct ixgbe_aci_cmd_link_topo_addr { + struct ixgbe_aci_cmd_link_topo_params topo_params; + __le16 handle; +/* Used to decode the handle field */ +#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) +#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) +#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 +}; + +/* Get Link Topology Handle (direct, 0x06E0) */ +struct ixgbe_aci_cmd_get_link_topo { + struct ixgbe_aci_cmd_link_topo_addr addr; + u8 node_part_num; +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48 +#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49 + u8 rsvd[9]; +}; + +/* Get Link Topology Pin (direct, 0x06E1) */ +struct ixgbe_aci_cmd_get_link_topo_pin { + struct ixgbe_aci_cmd_link_topo_addr addr; + u8 input_io_params; +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11 +/* 12 repeats intentionally due to two different uses depending on context */ +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13 +#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14 +#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3 +/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */ + u8 output_io_params; +/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */ + u8 output_io_flags; +#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5) +#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6) +#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7) + u8 rsvd[7]; +}; + +/* Read/Write SFF EEPROM command (indirect 0x06EE) */ +struct ixgbe_aci_cmd_sff_eeprom { + u8 lport_num; + u8 lport_num_valid; +#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0) + __le16 i2c_bus_addr; +#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0) +#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0) +#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10) +#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0 +#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M +#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0 +#define IXGBE_ACI_SFF_UPDATE_PAGE 1 +#define IXGBE_ACI_SFF_UPDATE_BANK 2 +#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3 +#define IXGBE_ACI_SFF_IS_WRITE BIT(15) + __le16 i2c_offset; + u8 module_bank; + u8 module_page; + __le32 addr_high; + __le32 addr_low; +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Write commands (indirect 0x0703) + * NVM Write Activate commands (direct 0x0707) + * NVM Shadow RAM Dump commands (direct 0x0707) + */ +struct ixgbe_aci_cmd_nvm { +#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF + __le16 offset_low; + u8 offset_high; /* For Write Activate offset_high is used as flags2 */ + u8 cmd_flags; +#define IXGBE_ACI_NVM_LAST_CMD BIT(0) +#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */ +#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1) +#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */ +#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4) +#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5) +#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6) +#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ +#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7) +#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */ +#define IXGBE_ACI_NVM_POR_FLAG 0 +#define IXGBE_ACI_NVM_PERST_FLAG 1 +#define IXGBE_ACI_NVM_EMPR_FLAG 2 +#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */ + /* For Write Activate, several flags are sent as part of a separate + * flags2 field using a separate byte. For simplicity of the software + * interface, we pass the flags as a 16 bit value so these flags are + * all offset by 8 bits + */ +#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */ + __le16 module_typeid; + __le16 length; +#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF + __le32 addr_high; + __le32 addr_low; +}; + +/* NVM Module_Type ID, needed offset and read_len for + * struct ixgbe_aci_cmd_nvm. + */ +#define IXGBE_ACI_NVM_START_POINT 0 + +/* NVM Checksum Command (direct, 0x0706) */ +struct ixgbe_aci_cmd_nvm_checksum { + u8 flags; +#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0) +#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1) + u8 rsvd; + __le16 checksum; /* Used only by response */ +#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA + u8 rsvd2[12]; +}; + +/** + * struct ixgbe_aci_desc - Admin Command (AC) descriptor + * @flags: IXGBE_ACI_FLAG_* flags + * @opcode: Admin command opcode + * @datalen: length in bytes of indirect/external data buffer + * @retval: return value from firmware + * @cookie_high: opaque data high-half + * @cookie_low: opaque data low-half + * @params: command-specific parameters + * + * Descriptor format for commands the driver posts via the + * Admin Command Interface (ACI). + * The firmware writes back onto the command descriptor and returns + * the result of the command. Asynchronous events that are not an immediate + * result of the command are written to the Admin Command Interface (ACI) using + * the same descriptor format. Descriptors are in little-endian notation with + * 32-bit words. + */ +struct ixgbe_aci_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + u8 raw[16]; + struct ixgbe_aci_cmd_get_ver get_ver; + struct ixgbe_aci_cmd_driver_ver driver_ver; + struct ixgbe_aci_cmd_get_exp_err exp_err; + struct ixgbe_aci_cmd_req_res res_owner; + struct ixgbe_aci_cmd_list_caps get_cap; + struct ixgbe_aci_cmd_disable_rxen disable_rxen; + struct ixgbe_aci_cmd_get_phy_caps get_phy; + struct ixgbe_aci_cmd_set_phy_cfg set_phy; + struct ixgbe_aci_cmd_restart_an restart_an; + struct ixgbe_aci_cmd_get_link_status get_link_status; + struct ixgbe_aci_cmd_set_event_mask set_event_mask; + struct ixgbe_aci_cmd_get_link_topo get_link_topo; + struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin; + struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param; + struct ixgbe_aci_cmd_nvm nvm; + struct ixgbe_aci_cmd_nvm_checksum nvm_checksum; + } params; +}; + +/* E610-specific adapter context structures */ + +struct ixgbe_link_status { + /* Refer to ixgbe_aci_phy_type for bits definition */ + u64 phy_type_low; + u64 phy_type_high; + u16 max_frame_size; + u16 link_speed; + u16 req_speeds; + u8 topo_media_conflict; + u8 link_cfg_err; + u8 lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 fec_info; + u8 pacing; + /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] + * of ixgbe_aci_get_phy_caps structure + */ + u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]; +}; + +/* Common HW capabilities for SW use */ +struct ixgbe_hw_caps { + /* Write CSR protection */ + u64 wr_csr_prot; + u32 switching_mode; + /* switching mode supported - EVB switching (including cloud) */ +#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0 + + /* Manageability mode & supported protocols over MCTP */ + u32 mgmt_mode; +#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0) +#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4) +#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8) + + u32 mgmt_protocols_mctp; +#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0) +#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1) +#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2) +#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3) + + u32 os2bmc; + u32 valid_functions; + /* DCB capabilities */ + u32 active_tc_bitmap; + u32 maxtc; + + /* RSS related capabilities */ + u32 rss_table_size; /* 512 for PFs and 64 for VFs */ + u32 rss_table_entry_width; /* RSS Entry width in bits */ + + /* Tx/Rx queues */ + u32 num_rxq; /* Number/Total Rx queues */ + u32 rxq_first_id; /* First queue ID for Rx queues */ + u32 num_txq; /* Number/Total Tx queues */ + u32 txq_first_id; /* First queue ID for Tx queues */ + + /* MSI-X vectors */ + u32 num_msix_vectors; + u32 msix_vector_first_id; + + /* Max MTU for function or device */ + u32 max_mtu; + + /* WOL related */ + u32 num_wol_proxy_fltr; + u32 wol_proxy_vsi_seid; + + /* LED/SDP pin count */ + u32 led_pin_num; + u32 sdp_pin_num; + + /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */ +#define IXGBE_MAX_SUPPORTED_GPIO_LED 12 +#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8 + u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED]; + u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP]; + /* SR-IOV virtualization */ + u8 sr_iov_1_1; /* SR-IOV enabled */ + /* VMDQ */ + u8 vmdq; /* VMDQ supported */ + + /* EVB capabilities */ + u8 evb_802_1_qbg; /* Edge Virtual Bridging */ + u8 evb_802_1_qbh; /* Bridge Port Extension */ + + u8 dcb; + u8 iscsi; + u8 ieee_1588; + u8 mgmt_cem; + + /* WoL and APM support */ +#define IXGBE_WOL_SUPPORT_M BIT(0) +#define IXGBE_ACPI_PROG_MTHD_M BIT(1) +#define IXGBE_PROXY_SUPPORT_M BIT(2) + u8 apm_wol_support; + u8 acpi_prog_mthd; + u8 proxy_support; + bool nvm_update_pending_nvm; + bool nvm_update_pending_orom; + bool nvm_update_pending_netlist; +#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0) +#define IXGBE_NVM_PENDING_OROM BIT(1) +#define IXGBE_NVM_PENDING_NETLIST BIT(2) + bool sec_rev_disabled; + bool update_disabled; + bool nvm_unified_update; + bool netlist_auth; +#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0) +#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1) +#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) +#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5) + bool no_drop_policy_support; + /* PCIe reset avoidance */ + bool pcie_reset_avoidance; /* false: not supported, true: supported */ + /* Post update reset restriction */ + bool reset_restrict_support; /* false: not supported, true: supported */ + + /* External topology device images within the NVM */ +#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4 + u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; + u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; + u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; +#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8 +#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8) + bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; +#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0) + bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT]; +#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1) +} __packed; + +/* Function specific capabilities */ +struct ixgbe_hw_func_caps { + u32 num_allocd_vfs; /* Number of allocated VFs */ + u32 vf_base_id; /* Logical ID of the first VF */ + u32 guar_num_vsi; + struct ixgbe_hw_caps common_cap; + bool no_drop_policy_ena; +}; + +/* Device wide capabilities */ +struct ixgbe_hw_dev_caps { + struct ixgbe_hw_caps common_cap; + u32 num_vfs_exposed; /* Total number of VFs exposed */ + u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ + u32 num_flow_director_fltr; /* Number of FD filters available */ + u32 num_funcs; +}; + +/* ACI event information */ +struct ixgbe_aci_event { + struct ixgbe_aci_desc desc; + u8 *msg_buf; + u16 msg_len; + u16 buf_len; +}; + +struct ixgbe_aci_info { + struct mutex lock; /* admin command interface lock */ + enum ixgbe_aci_err last_status; /* last status of sent admin command */ +}; + +/* Option ROM version information */ +struct ixgbe_orom_info { + u8 major; /* Major version of OROM */ + u8 patch; /* Patch version of OROM */ + u16 build; /* Build version of OROM */ + u32 srev; /* Security revision */ +}; + +/* NVM version information */ +struct ixgbe_nvm_info { + u32 eetrack; + u32 srev; + u8 major; + u8 minor; +} __packed; + +/* netlist version information */ +struct ixgbe_netlist_info { + u32 major; /* major high/low */ + u32 minor; /* minor high/low */ + u32 type; /* type high/low */ + u32 rev; /* revision high/low */ + u32 hash; /* SHA-1 hash word */ + u16 cust_ver; /* customer version */ +} __packed; + +/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules + * of the flash image. + */ +enum ixgbe_flash_bank { + IXGBE_INVALID_FLASH_BANK, + IXGBE_1ST_FLASH_BANK, + IXGBE_2ND_FLASH_BANK, +}; + +/* information for accessing NVM, OROM, and Netlist flash banks */ +struct ixgbe_bank_info { + u32 nvm_ptr; /* Pointer to 1st NVM bank */ + u32 nvm_size; /* Size of NVM bank */ + u32 orom_ptr; /* Pointer to 1st OROM bank */ + u32 orom_size; /* Size of OROM bank */ + u32 netlist_ptr; /* Ptr to 1st Netlist bank */ + u32 netlist_size; /* Size of Netlist bank */ + enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */ + enum ixgbe_flash_bank orom_bank; /* Active OROM bank */ + enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */ +}; + +/* Flash Chip Information */ +struct ixgbe_flash_info { + struct ixgbe_orom_info orom; /* Option ROM version info */ + u32 flash_size; /* Available flash size in bytes */ + struct ixgbe_nvm_info nvm; /* NVM version information */ + struct ixgbe_netlist_info netlist; /* Netlist version info */ + struct ixgbe_bank_info banks; /* Flash Bank information */ + u16 sr_words; /* Shadow RAM size in words */ + u8 blank_nvm_mode; /* is NVM empty (no FW present) */ +}; + +#endif /* _IXGBE_TYPE_E610_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 81e1df83f136..1fc821fb351a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -66,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. - **/ + * + * Return: 0 on success or negative value on failure + */ int ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { u32 swfw_mask = hw->phy.phy_semaphore_mask; @@ -133,10 +135,14 @@ mac_reset_top: hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; hw->mac.ops.init_rx_addrs(hw); + /* The following is not supported by E610. */ + if (hw->mac.type == ixgbe_mac_e610) + return status; + /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); - /* Add the SAN MAC address to the RAR only if it's a valid address */ + /* Add the SAN MAC address to RAR if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index b69a680d3ab5..6ed360c5b605 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -1,5 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ + +#ifndef _IXGBE_X540_H_ +#define _IXGBE_X540_H_ #include "ixgbe_type.h" @@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); + +#endif /* _IXGBE_X540_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index d9a8cf018d3b..277ceaf8a793 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "ixgbe_x540.h" +#include "ixgbe_x550.h" #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_mbx.h" @@ -2770,9 +2771,9 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) * semaphore, -EIO when command fails or -ENIVAL when incorrect * params passed. **/ -static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, u16 len, - const char *driver_ver) +int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver) { struct ixgbe_hic_drv_info2 fw_cmd; int ret_val; @@ -3505,14 +3506,14 @@ mac_reset_top: return status; } -/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype +/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype * anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing **/ -static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, - bool enable, int vf) +void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw, + bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; @@ -3527,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } -/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning +/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning * @hw: pointer to hardware structure * @enable: enable or disable source address pruning * @pool: Rx pool to set source address pruning for **/ -static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, - bool enable, - unsigned int pool) +void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool) { u64 pfflp; @@ -3831,9 +3832,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ .set_source_address_pruning = \ - &ixgbe_set_source_address_pruning_X550, \ + &ixgbe_set_source_address_pruning_x550, \ .set_ethertype_anti_spoofing = \ - &ixgbe_set_ethertype_anti_spoofing_X550, \ + &ixgbe_set_ethertype_anti_spoofing_x550, \ .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ @@ -4047,7 +4048,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_x) }; -static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { +const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_a) }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h new file mode 100644 index 000000000000..3e4092f8da3e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IXGBE_X550_H_ +#define _IXGBE_X550_H_ + +#include "ixgbe_type.h" + +extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT]; + +int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver); +void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool); +void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw, + bool enable, int vf); + +#endif /* _IXGBE_X550_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 5f08779c0e4e..a9bc96f6399d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBEVF_DEFINES_H_ #define _IXGBEVF_DEFINES_H_ @@ -16,6 +16,9 @@ #define IXGBE_DEV_ID_X550_VF_HV 0x1564 #define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 +#define IXGBE_DEV_ID_E610_VF 0x57AD +#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF + #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 #define IXGBE_VF_MAX_RX_QUEUES 8 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 130cb868774c..4384e892f967 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBEVF_H_ #define _IXGBEVF_H_ @@ -418,6 +418,8 @@ enum ixgbevf_boards { board_X550EM_x_vf, board_X550EM_x_vf_hv, board_x550em_a_vf, + board_e610_vf, + board_e610_vf_hv, }; enum ixgbevf_xcast_modes { @@ -434,12 +436,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy; extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; +extern const struct ixgbevf_info ixgbevf_e610_vf_info; extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; -extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; +extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info; /* needed by ethtool.c */ extern const char ixgbevf_driver_name[]; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 149911e3002a..6442f115a262 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code @@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2018 Intel Corporation."; + "Copyright (c) 2009 - 2024 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, + [board_e610_vf] = &ixgbevf_e610_vf_info, + [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, + {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID, + IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf}, /* required last entry */ {0, } }; @@ -732,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - /* XDP packets use error pointer so abort at this point */ - if (IS_ERR(skb)) - return true; - /* verify that the packet does not have any known errors */ if (unlikely(ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { @@ -1044,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, return IXGBEVF_XDP_TX; } -static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring, - struct xdp_buff *xdp) +static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring, + struct xdp_buff *xdp) { int result = IXGBEVF_XDP_PASS; struct ixgbevf_ring *xdp_ring; @@ -1080,7 +1081,7 @@ out_failure: break; } xdp_out: - return ERR_PTR(-result); + return result; } static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, @@ -1122,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb = rx_ring->skb; bool xdp_xmit = false; struct xdp_buff xdp; + int xdp_res = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -1165,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); #endif - skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); + xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp); } - if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { + if (xdp_res) { + if (xdp_res == IXGBEVF_XDP_TX) { xdp_xmit = true; ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -1189,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, } /* exit if we failed to retrieve a buffer */ - if (!skb) { + if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; rx_buffer->pagecnt_bias++; break; @@ -1203,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, continue; /* verify the packet layout is correct */ - if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { + if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } @@ -4693,6 +4695,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X540_vf: dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); break; + case ixgbe_mac_e610_vf: + dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n"); + break; case ixgbe_mac_82599_vf: default: dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index a55dd978f7ca..24d0237e7a99 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = { .check_for_ack = ixgbevf_check_for_ack_vf, .check_for_rst = ixgbevf_check_for_rst_vf, }; - -/* Mailbox operations when running on Hyper-V. - * On Hyper-V, PF/VF communication is not through the - * hardware mailbox; this communication is through - * a software mediated path. - * Most mail box operations are noop while running on - * Hyper-V. - */ -const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, -}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 1641d00d8ed3..da7a72ecce7a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "vf.h" #include "ixgbevf.h" @@ -1076,3 +1076,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { .mac = ixgbe_mac_x550em_a_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_e610_vf_info = { + .mac = ixgbe_mac_e610_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_e610_vf_hv_info = { + .mac = ixgbe_mac_e610_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index b4eef5b6c172..2d791bc26ae4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef __IXGBE_VF_H__ #define __IXGBE_VF_H__ @@ -54,6 +54,8 @@ enum ixgbe_mac_type { ixgbe_mac_X550_vf, ixgbe_mac_X550EM_x_vf, ixgbe_mac_x550em_a_vf, + ixgbe_mac_e610, + ixgbe_mac_e610_vf, ixgbe_num_macs }; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 660dff5426e7..83ce3bfefa5c 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -90,7 +90,6 @@ struct ltq_etop_priv { struct net_device *netdev; struct platform_device *pdev; struct ltq_eth_data *pldata; - struct resource *res; struct mii_bus *mii_bus; @@ -643,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev) { struct net_device *dev; struct ltq_etop_priv *priv; - struct resource *res; int err; int i; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "failed to get etop resource\n"); - err = -ENOENT; - goto err_out; - } - - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "failed to request etop resource\n"); - err = -EBUSY; - goto err_out; - } - - ltq_etop_membase = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!ltq_etop_membase) { + ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ltq_etop_membase)) { dev_err(&pdev->dev, "failed to remap etop engine %d\n", pdev->id); - err = -ENOMEM; + err = PTR_ERR(ltq_etop_membase); goto err_out; } @@ -679,7 +661,6 @@ ltq_etop_probe(struct platform_device *pdev) dev->netdev_ops = <q_eth_netdev_ops; dev->ethtool_ops = <q_etop_ethtool_ops; priv = netdev_priv(dev); - priv->res = res; priv->pdev = pdev; priv->pldata = dev_get_platdata(&pdev->dev); priv->netdev = dev; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a06048719e84..67a6ff07c83d 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2704,9 +2704,15 @@ static struct platform_device *port_platdev[3]; static void mv643xx_eth_shared_of_remove(void) { + struct mv643xx_eth_platform_data *pd; int n; for (n = 0; n < 3; n++) { + if (!port_platdev[n]) + continue; + pd = dev_get_platdata(&port_platdev[n]->dev); + if (pd) + of_node_put(pd->phy_node); platform_device_del(port_platdev[n]); port_platdev[n] = NULL; } @@ -2769,8 +2775,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, } ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); - if (!ppdev) - return -ENOMEM; + if (!ppdev) { + ret = -ENOMEM; + goto put_err; + } ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ppdev->dev.of_node = pnp; @@ -2792,6 +2800,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, port_err: platform_device_put(ppdev); +put_err: + of_node_put(ppd.phy_node); return ret; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 1fb285fa0bdb..4fe121b9f94b 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -284,8 +284,12 @@ MVNETA_TXQ_BUCKET_REFILL_PERIOD)) #define MVNETA_LPI_CTRL_0 0x2cc0 +#define MVNETA_LPI_CTRL_0_TS (0xff << 8) #define MVNETA_LPI_CTRL_1 0x2cc4 -#define MVNETA_LPI_REQUEST_ENABLE BIT(0) +#define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0) +#define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1) +#define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2) +#define MVNETA_LPI_CTRL_1_TW (0xfff << 4) #define MVNETA_LPI_CTRL_2 0x2cc8 #define MVNETA_LPI_STATUS 0x2ccc @@ -541,10 +545,6 @@ struct mvneta_port { struct mvneta_bm_pool *pool_short; int bm_win_id; - bool eee_enabled; - bool eee_active; - bool tx_lpi_enabled; - u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; @@ -3960,23 +3960,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) return container_of(pcs, struct mvneta_port, phylink_pcs); } -static int mvneta_pcs_validate(struct phylink_pcs *pcs, - unsigned long *supported, - const struct phylink_link_state *state) +static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) { - /* We only support QSGMII, SGMII, 802.3z and RGMII modes. - * When in 802.3z mode, we must have AN enabled: + /* When operating in an 802.3z mode, we must have AN enabled: * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1." + * Therefore, inband is "required". */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - return -EINVAL; + if (phy_interface_mode_is_8023z(interface)) + return LINK_INBAND_ENABLE; - return 0; + /* QSGMII, SGMII and RGMII can be configured to use inband + * signalling of the AN result. Indicate these as "possible". + */ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_QSGMII || + phy_interface_mode_is_rgmii(interface)) + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + /* For any other modes, indicate that inband is not supported. */ + return LINK_INBAND_DISABLE; } -static void mvneta_pcs_get_state(struct phylink_pcs *pcs, +static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { struct mvneta_port *pp = mvneta_pcs_to_port(pcs); @@ -4071,7 +4078,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { - .pcs_validate = mvneta_pcs_validate, + .pcs_inband_caps = mvneta_pcs_inband_caps, .pcs_get_state = mvneta_pcs_get_state, .pcs_config = mvneta_pcs_config, .pcs_an_restart = mvneta_pcs_an_restart, @@ -4206,18 +4213,6 @@ static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, return 0; } -static void mvneta_set_eee(struct mvneta_port *pp, bool enable) -{ - u32 lpi_ctl1; - - lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); - if (enable) - lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; - else - lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; - mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); -} - static void mvneta_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -4233,9 +4228,6 @@ static void mvneta_mac_link_down(struct phylink_config *config, val |= MVNETA_GMAC_FORCE_LINK_DOWN; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } - - pp->eee_active = false; - mvneta_set_eee(pp, false); } static void mvneta_mac_link_up(struct phylink_config *config, @@ -4284,11 +4276,56 @@ static void mvneta_mac_link_up(struct phylink_config *config, } mvneta_port_up(pp); +} + +static void mvneta_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); + u32 lpi1; + + lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); + lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE | + MVNETA_LPI_CTRL_1_REQUEST_FORCE | + MVNETA_LPI_CTRL_1_MANUAL_MODE); + mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1); +} + +static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); + u32 ts, tw, lpi0, lpi1, status; - if (phy && pp->eee_enabled) { - pp->eee_active = phy_init_eee(phy, false) >= 0; - mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); + status = mvreg_read(pp, MVNETA_GMAC_STATUS); + if (status & MVNETA_GMAC_SPEED_1000) { + /* At 1G speeds, the timer resolution are 1us, and + * 802.3 says tw is 16.5us. Round up to 17us. + */ + tw = 17; + ts = timer; + } else { + /* At 100M speeds, the timer resolutions are 10us, and + * 802.3 says tw is 30us. + */ + tw = 3; + ts = DIV_ROUND_UP(timer, 10); } + + if (ts > 255) + ts = 255; + + /* Configure ts */ + lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS); + mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0); + + /* Configure tw and enable LPI generation */ + lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); + lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW); + lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE; + mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1); + + return 0; } static const struct phylink_mac_ops mvneta_phylink_ops = { @@ -4298,6 +4335,8 @@ static const struct phylink_mac_ops mvneta_phylink_ops = { .mac_finish = mvneta_mac_finish, .mac_link_down = mvneta_mac_link_down, .mac_link_up = mvneta_mac_link_up, + .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi, + .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi, }; static int mvneta_mdio_probe(struct mvneta_port *pp) @@ -4385,6 +4424,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) if (pp->neta_armada3700) return 0; + netdev_lock(port->napi.dev); spin_lock(&pp->lock); /* * Configuring the driver for a new CPU while the driver is @@ -4392,6 +4432,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) */ if (pp->is_stopped) { spin_unlock(&pp->lock); + netdev_unlock(port->napi.dev); return 0; } netif_tx_stop_all_queues(pp->dev); @@ -4411,7 +4452,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) /* Mask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); - napi_enable(&port->napi); + napi_enable_locked(&port->napi); /* * Enable per-CPU interrupts on the CPU that is @@ -4432,6 +4473,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); spin_unlock(&pp->lock); + netdev_unlock(port->napi.dev); + return 0; } @@ -5099,14 +5142,6 @@ static int mvneta_ethtool_get_eee(struct net_device *dev, struct ethtool_keee *eee) { struct mvneta_port *pp = netdev_priv(dev); - u32 lpi_ctl0; - - lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); - - eee->eee_enabled = pp->eee_enabled; - eee->eee_active = pp->eee_active; - eee->tx_lpi_enabled = pp->tx_lpi_enabled; - eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; return phylink_ethtool_get_eee(pp->phylink, eee); } @@ -5115,7 +5150,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev, struct ethtool_keee *eee) { struct mvneta_port *pp = netdev_priv(dev); - u32 lpi_ctl0; /* The Armada 37x documents do not give limits for this other than * it being an 8-bit register. @@ -5123,16 +5157,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev, if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) return -EINVAL; - lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); - lpi_ctl0 &= ~(0xff << 8); - lpi_ctl0 |= eee->tx_lpi_timer << 8; - mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); - - pp->eee_enabled = eee->eee_enabled; - pp->tx_lpi_enabled = eee->tx_lpi_enabled; - - mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); - return phylink_ethtool_set_eee(pp->phylink, eee); } @@ -5446,6 +5470,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) !phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; + /* Ensure LPI is disabled */ + mvneta_mac_disable_tx_lpi(&pp->phylink_config); + return 0; } @@ -5537,6 +5564,13 @@ static int mvneta_probe(struct platform_device *pdev) pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */ + __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces); + pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD; + pp->phylink_config.lpi_timer_default = 250; + pp->phylink_config.eee_enabled_default = true; + phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.supported_interfaces); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 9e02e4367bec..44fe9b68d1c2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -481,6 +481,11 @@ #define MVPP22_GMAC_INT_SUM_MASK 0xa4 #define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) #define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2) +#define MVPP2_GMAC_LPI_CTRL0 0xc0 +#define MVPP2_GMAC_LPI_CTRL0_TS_MASK GENMASK(15, 8) +#define MVPP2_GMAC_LPI_CTRL1 0xc4 +#define MVPP2_GMAC_LPI_CTRL1_REQ_EN BIT(0) +#define MVPP2_GMAC_LPI_CTRL1_TW_MASK GENMASK(15, 4) /* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0, * relative to port->base. diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 571631a30320..dd76c1b7ed3a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5757,6 +5757,28 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack); } +static int mvpp2_ethtool_get_eee(struct net_device *dev, + struct ethtool_keee *eee) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_get_eee(port->phylink, eee); +} + +static int mvpp2_ethtool_set_eee(struct net_device *dev, + struct ethtool_keee *eee) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_set_eee(port->phylink, eee); +} + /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { @@ -5802,6 +5824,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .create_rxfh_context = mvpp2_create_rxfh_context, .modify_rxfh_context = mvpp2_modify_rxfh_context, .remove_rxfh_context = mvpp2_remove_rxfh_context, + .get_eee = mvpp2_ethtool_get_eee, + .set_eee = mvpp2_ethtool_set_eee, }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that @@ -6188,6 +6212,7 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) } static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); @@ -6224,22 +6249,30 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { .pcs_config = mvpp2_xlg_pcs_config, }; -static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, - unsigned long *supported, - const struct phylink_link_state *state) +static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) { - /* When in 802.3z mode, we must have AN enabled: + /* When operating in an 802.3z mode, we must have AN enabled: * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1. + * Therefore, inband is "required". */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - return -EINVAL; + if (phy_interface_mode_is_8023z(interface)) + return LINK_INBAND_ENABLE; - return 0; + /* SGMII and RGMII can be configured to use inband signalling of the + * AN result. Indicate these as "possible". + */ + if (interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_rgmii(interface)) + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + /* For any other modes, indicate that inband is not supported. */ + return LINK_INBAND_DISABLE; } static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); @@ -6343,7 +6376,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { - .pcs_validate = mvpp2_gmac_pcs_validate, + .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps, .pcs_get_state = mvpp2_gmac_pcs_get_state, .pcs_config = mvpp2_gmac_pcs_config, .pcs_an_restart = mvpp2_gmac_pcs_an_restart, @@ -6665,6 +6698,55 @@ static void mvpp2_mac_link_down(struct phylink_config *config, mvpp2_port_disable(port); } +static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1, + MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0); +} + +static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + u32 ts, tw, lpi1, status; + + status = readl(port->base + MVPP2_GMAC_STATUS0); + if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) { + /* At 1G speeds, the timer resolution are 1us, and + * 802.3 says tw is 16.5us. Round up to 17us. + */ + tw = 17; + ts = timer; + } else { + /* At 100M speeds, the timer resolutions are 10us, and + * 802.3 says tw is 30us. + */ + tw = 3; + ts = DIV_ROUND_UP(timer, 10); + } + + if (ts > 255) + ts = 255; + + /* Configure ts */ + mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0, + MVPP2_GMAC_LPI_CTRL0_TS_MASK, + FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts)); + + lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1); + + /* Configure tw */ + lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK); + + /* Enable LPI generation */ + writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN, + port->base + MVPP2_GMAC_LPI_CTRL1); + + return 0; +} + static const struct phylink_mac_ops mvpp2_phylink_ops = { .mac_select_pcs = mvpp2_select_pcs, .mac_prepare = mvpp2_mac_prepare, @@ -6672,6 +6754,8 @@ static const struct phylink_mac_ops mvpp2_phylink_ops = { .mac_finish = mvpp2_mac_finish, .mac_link_up = mvpp2_mac_link_up, .mac_link_down = mvpp2_mac_link_down, + .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi, + .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi, }; /* Work-around for ACPI */ @@ -6950,6 +7034,15 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->phylink_config.mac_capabilities = MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phylink_config.lpi_interfaces); + + port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD; + + /* Setup EEE. Choose 250us idle. */ + port->phylink_config.lpi_timer_default = 250; + port->phylink_config.eee_enabled_default = true; + if (port->priv->global_tx_fc) port->phylink_config.mac_capabilities |= MAC_SYM_PAUSE | MAC_ASYM_PAUSE; @@ -7024,6 +7117,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_port_pcpu; } port->phylink = phylink; + + mvpp2_mac_disable_tx_lpi(&port->phylink_config); } else { dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); port->phylink = NULL; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c index 4f4d58189118..a88c006ea65b 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c @@ -150,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev, iface_rx_stats, iface_tx_stats); - for (q = 0; q < oct->num_oqs; q++) { - struct octep_iq *iq = oct->iq[q]; - struct octep_oq *oq = oct->oq[q]; - - tx_packets += iq->stats.instr_completed; - tx_bytes += iq->stats.bytes_sent; - tx_busy_errors += iq->stats.tx_busy; - - rx_packets += oq->stats.packets; - rx_bytes += oq->stats.bytes; - rx_alloc_errors += oq->stats.alloc_failures; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + tx_packets += oct->stats_iq[q].instr_completed; + tx_bytes += oct->stats_iq[q].bytes_sent; + tx_busy_errors += oct->stats_iq[q].tx_busy; + + rx_packets += oct->stats_oq[q].packets; + rx_bytes += oct->stats_oq[q].bytes; + rx_alloc_errors += oct->stats_oq[q].alloc_failures; } i = 0; data[i++] = rx_packets; @@ -198,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev, data[i++] = iface_rx_stats->err_pkts; /* Per Tx Queue stats */ - for (q = 0; q < oct->num_iqs; q++) { - struct octep_iq *iq = oct->iq[q]; - - data[i++] = iq->stats.instr_posted; - data[i++] = iq->stats.instr_completed; - data[i++] = iq->stats.bytes_sent; - data[i++] = iq->stats.tx_busy; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + data[i++] = oct->stats_iq[q].instr_posted; + data[i++] = oct->stats_iq[q].instr_completed; + data[i++] = oct->stats_iq[q].bytes_sent; + data[i++] = oct->stats_iq[q].tx_busy; } /* Per Rx Queue stats */ - for (q = 0; q < oct->num_oqs; q++) { - struct octep_oq *oq = oct->oq[q]; - - data[i++] = oq->stats.packets; - data[i++] = oq->stats.bytes; - data[i++] = oq->stats.alloc_failures; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + data[i++] = oct->stats_oq[q].packets; + data[i++] = oct->stats_oq[q].bytes; + data[i++] = oct->stats_oq[q].alloc_failures; } } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 549436efc204..0a679e95196f 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq) if (unlikely(IQ_INSTR_SPACE(iq) > OCTEP_WAKE_QUEUE_THRESHOLD)) { netif_start_subqueue(iq->netdev, iq->q_no); - iq->stats.restart_cnt++; + iq->stats->restart_cnt++; return 0; } @@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb, wmb(); /* Ring Doorbell to notify the NIC of new packets */ writel(iq->fill_cnt, iq->doorbell_reg); - iq->stats.instr_posted += iq->fill_cnt; + iq->stats->instr_posted += iq->fill_cnt; iq->fill_cnt = 0; return NETDEV_TX_OK; @@ -991,37 +991,24 @@ dma_map_err: static void octep_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { - u64 tx_packets, tx_bytes, rx_packets, rx_bytes; struct octep_device *oct = netdev_priv(netdev); + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; int q; - if (netif_running(netdev)) - octep_ctrl_net_get_if_stats(oct, - OCTEP_CTRL_NET_INVALID_VFID, - &oct->iface_rx_stats, - &oct->iface_tx_stats); - tx_packets = 0; tx_bytes = 0; rx_packets = 0; rx_bytes = 0; - for (q = 0; q < oct->num_oqs; q++) { - struct octep_iq *iq = oct->iq[q]; - struct octep_oq *oq = oct->oq[q]; - - tx_packets += iq->stats.instr_completed; - tx_bytes += iq->stats.bytes_sent; - rx_packets += oq->stats.packets; - rx_bytes += oq->stats.bytes; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + tx_packets += oct->stats_iq[q].instr_completed; + tx_bytes += oct->stats_iq[q].bytes_sent; + rx_packets += oct->stats_oq[q].packets; + rx_bytes += oct->stats_oq[q].bytes; } stats->tx_packets = tx_packets; stats->tx_bytes = tx_bytes; stats->rx_packets = rx_packets; stats->rx_bytes = rx_bytes; - stats->multicast = oct->iface_rx_stats.mcast_pkts; - stats->rx_errors = oct->iface_rx_stats.err_pkts; - stats->collisions = oct->iface_tx_stats.xscol; - stats->tx_fifo_errors = oct->iface_tx_stats.undflw; } /** @@ -1137,6 +1124,43 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features return err; } +static int octep_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi) +{ + struct octep_device *oct = netdev_priv(dev); + + ivi->vf = vf; + ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); + ivi->spoofchk = true; + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + ivi->trusted = false; + + return 0; +} + +static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct octep_device *oct = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(mac)) { + dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac); + return -EADDRNOTAVAIL; + } + + dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac); + ether_addr_copy(oct->vf_info[vf].mac_addr, mac); + oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF; + + err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true); + if (err) + dev_err(&oct->pdev->dev, + "Set VF%d MAC address failed via host control Mbox\n", + vf); + + return err; +} + static const struct net_device_ops octep_netdev_ops = { .ndo_open = octep_open, .ndo_stop = octep_stop, @@ -1146,6 +1170,8 @@ static const struct net_device_ops octep_netdev_ops = { .ndo_set_mac_address = octep_set_mac, .ndo_change_mtu = octep_change_mtu, .ndo_set_features = octep_set_features, + .ndo_get_vf_config = octep_get_vf_config, + .ndo_set_vf_mac = octep_set_vf_mac }; /** diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index fee59e0e0138..81ac4267811c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -220,6 +220,7 @@ struct octep_iface_link_info { /* The Octeon VF device specific info data structure.*/ struct octep_pfvf_info { u8 mac_addr[ETH_ALEN]; + u32 flags; u32 mbox_version; }; @@ -257,11 +258,17 @@ struct octep_device { /* Pointers to Octeon Tx queues */ struct octep_iq *iq[OCTEP_MAX_IQ]; + /* Per iq stats */ + struct octep_iq_stats stats_iq[OCTEP_MAX_IQ]; + /* Rx queues (OQ: Output Queue) */ u16 num_oqs; /* Pointers to Octeon Rx queues */ struct octep_oq *oq[OCTEP_MAX_OQ]; + /* Per oq stats */ + struct octep_oq_stats stats_oq[OCTEP_MAX_OQ]; + /* Hardware port number of the PCIe interface */ u16 pcie_port; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c index e6eb98d70f3c..ebecdd29f3bd 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c @@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) { + dev_err(&oct->pdev->dev, + "VF%d attempted to override administrative set MAC address\n", + vf_id); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; + return; + } + err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n", + vf_id); return; } + + ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr); rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; } @@ -171,10 +182,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) { + dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id); + ether_addr_copy(rsp->s_set_mac.mac_addr, + oct->vf_info[vf_id].mac_addr); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; + return; + } err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n", + vf_id); return; } rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h index 0dc6eead292a..386a095a99bc 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h @@ -8,8 +8,6 @@ #ifndef _OCTEP_PFVF_MBOX_H_ #define _OCTEP_PFVF_MBOX_H_ -/* VF flags */ -#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */ #define OCTEON_SDP_16K_HW_FRS 16380UL #define OCTEON_SDP_64K_HW_FRS 65531UL @@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version { #define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2 +/* VF flags */ +/* PF has set VF MAC address */ +#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0) + enum octep_pfvf_mbox_opcode { OCTEP_PFVF_MBOX_CMD_VERSION, OCTEP_PFVF_MBOX_CMD_SET_MTU, diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index 8af75cb37c3e..82b6b19e76b4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) page = dev_alloc_page(); if (unlikely(!page)) { dev_err(oq->dev, "refill: rx buffer alloc failed\n"); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } @@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) "OQ-%d buffer refill: DMA mapping error!\n", oq->q_no); put_page(page); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } oq->buff_info[refill_idx].page = page; @@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no) oq->netdev = oct->netdev; oq->dev = &oct->pdev->dev; oq->q_no = q_no; + oq->stats = &oct->stats_oq[q_no]; oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); oq->ring_size_mask = oq->max_count - 1; oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); @@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct, if (!skb) { octep_oq_drop_rx(oq, buff_info, &read_idx, &desc_used); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; continue; } skb_reserve(skb, data_offset); @@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct, oq->host_read_idx = read_idx; oq->refill_count += desc_used; - oq->stats.packets += pkt; - oq->stats.bytes += rx_bytes; + oq->stats->packets += pkt; + oq->stats->bytes += rx_bytes; return pkt; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h index 3b08e2d560dc..b4696c93d0e6 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h @@ -186,8 +186,8 @@ struct octep_oq { */ u8 __iomem *pkts_sent_reg; - /* Statistics for this OQ. */ - struct octep_oq_stats stats; + /* Pointer to statistics for this OQ. */ + struct octep_oq_stats *stats; /* Packets pending to be processed */ u32 pkts_pending; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c index 06851b78aa28..08ee90013fef 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget) } iq->pkts_processed += compl_pkts; - iq->stats.instr_completed += compl_pkts; - iq->stats.bytes_sent += compl_bytes; - iq->stats.sgentry_sent += compl_sg; + iq->stats->instr_completed += compl_pkts; + iq->stats->bytes_sent += compl_bytes; + iq->stats->sgentry_sent += compl_sg; iq->flush_index = fi; netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); @@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no) iq->netdev = oct->netdev; iq->dev = &oct->pdev->dev; iq->q_no = q_no; + iq->stats = &oct->stats_iq[q_no]; iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); iq->ring_size_mask = iq->max_count - 1; iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h index 875a2c34091f..58fb39dda977 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -170,8 +170,8 @@ struct octep_iq { */ u16 flush_index; - /* Statistics for this input queue. */ - struct octep_iq_stats stats; + /* Pointer to statistics for this input queue. */ + struct octep_iq_stats *stats; /* Pointer to the Virtual Base addr of the input ring. */ struct octep_tx_desc_hw *desc_ring; diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c index 7b21439a315f..d60441928ba9 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c @@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev, iface_tx_stats = &oct->iface_tx_stats; iface_rx_stats = &oct->iface_rx_stats; - for (q = 0; q < oct->num_oqs; q++) { - struct octep_vf_iq *iq = oct->iq[q]; - struct octep_vf_oq *oq = oct->oq[q]; - - tx_busy_errors += iq->stats.tx_busy; - rx_alloc_errors += oq->stats.alloc_failures; + for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) { + tx_busy_errors += oct->stats_iq[q].tx_busy; + rx_alloc_errors += oct->stats_oq[q].alloc_failures; } i = 0; data[i++] = rx_alloc_errors; @@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev, data[i++] = iface_rx_stats->dropped_octets_fifo_full; /* Per Tx Queue stats */ - for (q = 0; q < oct->num_iqs; q++) { - struct octep_vf_iq *iq = oct->iq[q]; - - data[i++] = iq->stats.instr_posted; - data[i++] = iq->stats.instr_completed; - data[i++] = iq->stats.bytes_sent; - data[i++] = iq->stats.tx_busy; + for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) { + data[i++] = oct->stats_iq[q].instr_posted; + data[i++] = oct->stats_iq[q].instr_completed; + data[i++] = oct->stats_iq[q].bytes_sent; + data[i++] = oct->stats_iq[q].tx_busy; } /* Per Rx Queue stats */ for (q = 0; q < oct->num_oqs; q++) { - struct octep_vf_oq *oq = oct->oq[q]; - - data[i++] = oq->stats.packets; - data[i++] = oq->stats.bytes; - data[i++] = oq->stats.alloc_failures; + data[i++] = oct->stats_oq[q].packets; + data[i++] = oct->stats_oq[q].bytes; + data[i++] = oct->stats_oq[q].alloc_failures; } } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index 7e6771c9cdbb..18c922dd5fc6 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -574,7 +574,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq) * caused queues to get re-enabled after * being stopped */ - iq->stats.restart_cnt++; + iq->stats->restart_cnt++; fallthrough; case 1: /* Queue left enabled, since IQ is not yet full*/ return 0; @@ -731,7 +731,7 @@ ring_dbell: /* Flush the hw descriptors before writing to doorbell */ smp_wmb(); writel(iq->fill_cnt, iq->doorbell_reg); - iq->stats.instr_posted += iq->fill_cnt; + iq->stats->instr_posted += iq->fill_cnt; iq->fill_cnt = 0; return NETDEV_TX_OK; } @@ -786,27 +786,16 @@ static void octep_vf_get_stats64(struct net_device *netdev, tx_bytes = 0; rx_packets = 0; rx_bytes = 0; - for (q = 0; q < oct->num_oqs; q++) { - struct octep_vf_iq *iq = oct->iq[q]; - struct octep_vf_oq *oq = oct->oq[q]; - - tx_packets += iq->stats.instr_completed; - tx_bytes += iq->stats.bytes_sent; - rx_packets += oq->stats.packets; - rx_bytes += oq->stats.bytes; + for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) { + tx_packets += oct->stats_iq[q].instr_completed; + tx_bytes += oct->stats_iq[q].bytes_sent; + rx_packets += oct->stats_oq[q].packets; + rx_bytes += oct->stats_oq[q].bytes; } stats->tx_packets = tx_packets; stats->tx_bytes = tx_bytes; stats->rx_packets = rx_packets; stats->rx_bytes = rx_bytes; - if (!octep_vf_get_if_stats(oct)) { - stats->multicast = oct->iface_rx_stats.mcast_pkts; - stats->rx_errors = oct->iface_rx_stats.err_pkts; - stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full + - oct->iface_rx_stats.err_pkts; - stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full; - stats->tx_dropped = oct->iface_tx_stats.dropped; - } } /** diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h index 5769f62545cd..1a352f41f823 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h @@ -246,11 +246,17 @@ struct octep_vf_device { /* Pointers to Octeon Tx queues */ struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ]; + /* Per iq stats */ + struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ]; + /* Rx queues (OQ: Output Queue) */ u16 num_oqs; /* Pointers to Octeon Rx queues */ struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ]; + /* Per oq stats */ + struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ]; + /* Hardware port number of the PCIe interface */ u16 pcie_port; diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c index 82821bc28634..d70c8be3cfc4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c @@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o page = dev_alloc_page(); if (unlikely(!page)) { dev_err(oq->dev, "refill: rx buffer alloc failed\n"); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } @@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o "OQ-%d buffer refill: DMA mapping error!\n", oq->q_no); put_page(page); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } oq->buff_info[refill_idx].page = page; @@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no) oq->netdev = oct->netdev; oq->dev = &oct->pdev->dev; oq->q_no = q_no; + oq->stats = &oct->stats_oq[q_no]; oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); oq->ring_size_mask = oq->max_count - 1; oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); @@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct, oq->host_read_idx = read_idx; oq->refill_count += desc_used; - oq->stats.packets += pkt; - oq->stats.bytes += rx_bytes; + oq->stats->packets += pkt; + oq->stats->bytes += rx_bytes; return pkt; } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h index fe46838b5200..9e296b7d7e34 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h @@ -187,7 +187,7 @@ struct octep_vf_oq { u8 __iomem *pkts_sent_reg; /* Statistics for this OQ. */ - struct octep_vf_oq_stats stats; + struct octep_vf_oq_stats *stats; /* Packets pending to be processed */ u32 pkts_pending; diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c index 47a5c054fdb6..8180e5ce3d7e 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c @@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget) } iq->pkts_processed += compl_pkts; - iq->stats.instr_completed += compl_pkts; - iq->stats.bytes_sent += compl_bytes; - iq->stats.sgentry_sent += compl_sg; + iq->stats->instr_completed += compl_pkts; + iq->stats->bytes_sent += compl_bytes; + iq->stats->sgentry_sent += compl_sg; iq->flush_index = fi; netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts, @@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no) iq->netdev = oct->netdev; iq->dev = &oct->pdev->dev; iq->q_no = q_no; + iq->stats = &oct->stats_iq[q_no]; iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); iq->ring_size_mask = iq->max_count - 1; iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h index f338b975103c..1cede90e3a5f 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h @@ -129,7 +129,7 @@ struct octep_vf_iq { u16 flush_index; /* Statistics for this input queue. */ - struct octep_vf_iq_stats stats; + struct octep_vf_iq_stats *stats; /* Pointer to the Virtual Base addr of the input ring. */ struct octep_vf_tx_desc_hw *desc_ring; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 62c07407eb94..005ca8a056c0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -313,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ msg_rsp) \ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ nix_bandprof_get_hwinfo_rsp) \ +M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \ + nix_bp_cfg_rsp) \ +M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \ + msg_rsp) \ M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ msg_req, nix_inline_ipsec_cfg) \ M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 148144f5b61d..a1f9ec03c2ce 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -917,19 +917,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) /* The 'qsize' entry dumps current Aura/Pool context Qsize * and each context's current enable/disable status in a bitmap. */ -static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, +static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused, int blktype) { - void (*print_qsize)(struct seq_file *filp, + void (*print_qsize)(struct seq_file *s, struct rvu_pfvf *pfvf) = NULL; - struct dentry *current_dir; struct rvu_pfvf *pfvf; struct rvu *rvu; int qsize_id; u16 pcifunc; int blkaddr; - rvu = filp->private; + rvu = s->private; switch (blktype) { case BLKTYPE_NPA: qsize_id = rvu->rvu_dbg.npa_qsize_id; @@ -945,32 +944,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, return -EINVAL; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->file->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(s->file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); - print_qsize(filp, pfvf); + print_qsize(s, pfvf); return 0; } -static ssize_t rvu_dbg_qsize_write(struct file *filp, +static ssize_t rvu_dbg_qsize_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int blktype) { char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; - struct seq_file *seqfile = filp->private_data; + struct seq_file *seqfile = file->private_data; char *cmd_buf, *cmd_buf_tmp, *subtoken; struct rvu *rvu = seqfile->private; - struct dentry *current_dir; int blkaddr; u16 pcifunc; int ret, lf; @@ -996,13 +991,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, goto qsize_write_done; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { ret = -EINVAL; @@ -2704,8 +2696,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) &rvu_dbg_nix_ndc_tx_hits_miss_fops); debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_rx_hits_miss_fops); - debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_qsize_fops); + debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu, + blkaddr, &rvu_dbg_nix_qsize_fops); debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_band_prof_ctx_fops); debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, @@ -2854,28 +2846,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) +static int rvu_dbg_derive_lmacid(struct seq_file *s) { - struct dentry *current_dir; - char *buf; - - current_dir = filp->file->f_path.dentry->d_parent; - buf = strrchr(current_dir->d_name.name, 'c'); - if (!buf) - return -EINVAL; - - return kstrtoint(buf + 1, 10, lmac_id); + return debugfs_get_aux_num(s->file); } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused) { - int lmac_id, err; - - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_stats(filp, lmac_id); - - return err; + return cgx_print_stats(s, rvu_dbg_derive_lmacid(s)); } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); @@ -2933,15 +2911,9 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) return 0; } -static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused) { - int err, lmac_id; - - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_dmac_flt(filp, lmac_id); - - return err; + return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s)); } RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); @@ -2980,10 +2952,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) rvu->rvu_dbg.lmac = debugfs_create_dir(dname, rvu->rvu_dbg.cgx); - debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, - cgx, &rvu_dbg_cgx_stat_fops); - debugfs_create_file("mac_filter", 0600, - rvu->rvu_dbg.lmac, cgx, + debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac, + cgx, lmac_id, &rvu_dbg_cgx_stat_fops); + debugfs_create_file_aux_num("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, lmac_id, &rvu_dbg_cgx_dmac_flt_fops); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a5d1e2bddd58..613655fcd34f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -569,9 +569,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) mutex_unlock(&rvu->rsrc_lock); } -int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct msg_rsp *rsp) +static u16 nix_get_channel(u16 chan, bool cpt_link) +{ + /* CPT channel for a given link channel is always + * assumed to be BIT(11) set in link channel. + */ + return cpt_link ? chan | BIT(11) : chan; +} + +static int nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp, bool cpt_link) { u16 pcifunc = req->hdr.pcifunc; int blkaddr, pf, type, err; @@ -579,6 +587,7 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; struct nix_bp *bp; + u16 chan_v; u64 cfg; pf = rvu_get_pf(pcifunc); @@ -589,6 +598,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, if (is_sdp_pfvf(pcifunc)) type = NIX_INTF_TYPE_SDP; + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) @@ -597,8 +609,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, bp = &nix_hw->bp; chan_base = pfvf->rx_chan_base + req->chan_base; for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + chan_v = nix_get_channel(chan, cpt_link); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg & ~BIT_ULL(16)); if (type == NIX_INTF_TYPE_LBK) { @@ -617,6 +630,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, true); +} + static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { @@ -696,15 +723,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, return bpid; } -int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct nix_bp_cfg_rsp *rsp) +static int nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp, + bool cpt_link) { int blkaddr, pf, type, chan_id = 0; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; u16 chan_base, chan; s16 bpid, bpid_base; + u16 chan_v; u64 cfg; pf = rvu_get_pf(pcifunc); @@ -717,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, type != NIX_INTF_TYPE_SDP) return 0; + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); @@ -730,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return -EINVAL; } - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + chan_v = nix_get_channel(chan, cpt_link); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); cfg &= ~GENMASK_ULL(8, 0); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); @@ -750,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, true); +} + static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, u64 format, bool v4, u64 *fidx) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index dbc971266865..cb6513ab35e7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -15,5 +15,6 @@ rvu_rep-y := rep.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o +rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c new file mode 100644 index 000000000000..09a5b5268205 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -0,0 +1,1056 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#include <net/xfrm.h> +#include <linux/netdevice.h> +#include <linux/bitfield.h> +#include <crypto/aead.h> +#include <crypto/gcm.h> + +#include "otx2_common.h" +#include "otx2_struct.h" +#include "cn10k_ipsec.h" + +static bool is_dev_support_ipsec_offload(struct pci_dev *pdev) +{ + return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev); +} + +static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf) +{ + enum cn10k_cpt_hw_state_e state; + + while (true) { + state = atomic_cmpxchg(&pf->ipsec.cpt_state, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE); + if (state == CN10K_CPT_HW_AVAILABLE) + return true; + if (state == CN10K_CPT_HW_UNAVAILABLE) + return false; + + mdelay(1); + } +} + +static void cn10k_cpt_device_set_available(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE); +} + +static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE); +} + +static int cn10k_outb_cptlf_attach(struct otx2_nic *pf) +{ + struct rsrc_attach *attach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + /* Get memory to put this msg */ + attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox); + if (!attach) + goto unlock; + + attach->cptlfs = true; + attach->modify = true; + + /* Send attach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_detach(struct otx2_nic *pf) +{ + struct rsrc_detach *detach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox); + if (!detach) + goto unlock; + + detach->partial = true; + detach->cptlfs = true; + + /* Send detach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf) +{ + struct cpt_lf_alloc_req_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox); + if (!req) + goto unlock; + + /* PF function */ + req->nix_pf_func = pf->pcifunc; + /* Enable SE-IE Engine Group */ + req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP; + + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_free(struct otx2_nic *pf) +{ + mutex_lock(&pf->mbox.lock); + otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox); + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + +static int cn10k_outb_cptlf_config(struct otx2_nic *pf) +{ + struct cpt_inline_ipsec_cfg_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox); + if (!req) + goto unlock; + + req->dir = CPT_INLINE_OUTBOUND; + req->enable = 1; + req->nix_pf_func = pf->pcifunc; + ret = otx2_sync_mbox_msg(&pf->mbox); +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf) +{ + u64 reg_val; + + /* Set Execution Enable of instruction queue */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + reg_val |= BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Set iqueue's enqueuing */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL); + reg_val |= BIT_ULL(0); + otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val); +} + +static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf) +{ + u32 inflight, grb_cnt, gwb_cnt; + u32 nq_ptr, dq_ptr; + int timeout = 20; + u64 reg_val; + int cnt; + + /* Disable instructions enqueuing */ + otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull); + + /* Wait for instruction queue to become empty. + * CPT_LF_INPROG.INFLIGHT count is zero + */ + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + if (!inflight) + break; + + usleep_range(10000, 20000); + if (timeout-- < 0) { + netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n"); + break; + } + } while (1); + + /* Disable executions in the LF's queue, + * the queue should be empty at this point + */ + reg_val &= ~BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Wait for instruction queue to become empty */ + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + if (reg_val & BIT_ULL(31)) + cnt = 0; + else + cnt++; + reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR); + nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + } while ((cnt < 10) && (nq_ptr != dq_ptr)); + + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val); + gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val); + if (inflight == 0 && gwb_cnt < 40 && + (grb_cnt == 0 || grb_cnt == 40)) + cnt++; + else + cnt = 0; + } while (cnt < 10); +} + +/* Allocate memory for CPT outbound Instruction queue. + * Instruction queue memory format is: + * ----------------------------- + * | Instruction Group memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | x 16 Bytes) | + * | | + * ----------------------------- <-- CPT_LF_Q_BASE[ADDR] + * | Flow Control (128 Bytes) | + * | | + * ----------------------------- + * | Instruction Memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | × 40 × 64 bytes) | + * | | + * ----------------------------- + */ +static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN + + CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN; + + iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size, + &iq->real_dma_addr, GFP_KERNEL); + if (!iq->real_vaddr) + return -ENOMEM; + + /* iq->vaddr/dma_addr points to Flow Control location */ + iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES; + iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES; + + /* Align pointers */ + iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN); + iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN); + return 0; +} + +static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + if (iq->real_vaddr) + dma_free_coherent(pf->dev, iq->size, iq->real_vaddr, + iq->real_dma_addr); + + iq->real_vaddr = NULL; + iq->vaddr = NULL; +} + +static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf) +{ + u64 reg_val; + int ret; + + /* Allocate Memory for CPT IQ */ + ret = cn10k_outb_cptlf_iq_alloc(pf); + if (ret) + return ret; + + /* Disable IQ */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr); + + /* Set IQ size */ + reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 + + CN10K_CPT_EXTRA_SIZE_DIV40); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val); + + return 0; +} + +static int cn10k_outb_cptlf_init(struct otx2_nic *pf) +{ + int ret; + + /* Initialize CPTLF Instruction Queue (IQ) */ + ret = cn10k_outb_cptlf_iq_init(pf); + if (ret) + return ret; + + /* Configure CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_config(pf); + if (ret) + goto iq_clean; + + /* Enable CPTLF IQ */ + cn10k_outb_cptlf_iq_enable(pf); + return 0; +iq_clean: + cn10k_outb_cptlf_iq_free(pf); + return ret; +} + +static int cn10k_outb_cpt_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int ret; + + /* Attach a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_attach(pf); + if (ret) + return ret; + + /* Allocate a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_alloc(pf); + if (ret) + goto detach; + + /* Initialize the CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_init(pf); + if (ret) + goto lf_free; + + pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf, + CN10K_CPT_LF_NQX(0)); + + /* Set ipsec offload enabled for this device */ + pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + cn10k_cpt_device_set_available(pf); + return 0; + +lf_free: + cn10k_outb_cptlf_free(pf); +detach: + cn10k_outb_cptlf_detach(pf); + return ret; +} + +static int cn10k_outb_cpt_clean(struct otx2_nic *pf) +{ + int ret; + + if (!cn10k_cpt_device_set_inuse(pf)) { + netdev_err(pf->netdev, "CPT LF device unavailable\n"); + return -ENODEV; + } + + /* Set ipsec offload disabled for this device */ + pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + /* Disable CPTLF Instruction Queue (IQ) */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address and size to 0 */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0); + + /* Free CPTLF IQ */ + cn10k_outb_cptlf_iq_free(pf); + + /* Free and detach CPT LF */ + cn10k_outb_cptlf_free(pf); + ret = cn10k_outb_cptlf_detach(pf); + if (ret) + netdev_err(pf->netdev, "Failed to detach CPT LF\n"); + + cn10k_cpt_device_set_unavailable(pf); + return ret; +} + +static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst, + u64 size) +{ + struct otx2_lmt_info *lmt_info; + u64 val = 0, tar_addr = 0; + + lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id()); + /* FIXME: val[0:10] LMT_ID. + * [12:15] no of LMTST - 1 in the burst. + * [19:63] data size of each LMTST in the burst except first. + */ + val = (lmt_info->lmt_id & 0x7FF); + /* Target address for LMTST flush tells HW how many 128bit + * words are present. + * tar_addr[6:4] size of first LMTST - 1 in units of 128b. + */ + tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4; + dma_wmb(); + memcpy((u64 *)lmt_info->lmt_addr, inst, size); + cn10k_lmt_flush(val, tar_addr); +} + +static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf, + struct cpt_res_s *res) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + u64 *completion_ptr = (u64 *)res; + + do { + if (time_after(jiffies, timeout)) { + netdev_err(pf->netdev, "CPT response timeout\n"); + return -EBUSY; + } + } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) == + CN10K_CPT_COMP_E_NOTDONE); + + if (!(res->compcode == CN10K_CPT_COMP_E_GOOD || + res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) { + netdev_err(pf->netdev, "compcode=%x doneint=%x\n", + res->compcode, res->doneint); + netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n", + res->uc_compcode, (u64)res->uc_info, res->esn); + } + return 0; +} + +static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info) +{ + dma_addr_t res_iova, dptr_iova, sa_iova; + struct cn10k_tx_sa_s *sa_dptr; + struct cpt_inst_s inst = {}; + struct cpt_res_s *res; + u32 sa_size, off; + u64 *sptr, *dptr; + u64 reg_val; + int ret; + + sa_iova = sa_info->iova; + if (!sa_iova) + return -EINVAL; + + res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s), + &res_iova, GFP_ATOMIC); + if (!res) + return -ENOMEM; + + sa_size = sizeof(struct cn10k_tx_sa_s); + sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC); + if (!sa_dptr) { + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, + res_iova); + return -ENOMEM; + } + + sptr = (__force u64 *)sa_info->base; + dptr = (__force u64 *)sa_dptr; + for (off = 0; off < (sa_size / 8); off++) + *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off)); + + res->compcode = CN10K_CPT_COMP_E_NOTDONE; + inst.res_addr = res_iova; + inst.dptr = (u64)dptr_iova; + inst.param2 = sa_size >> 3; + inst.dlen = sa_size; + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA; + inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA; + inst.cptr = sa_iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* Check if CPT-LF available */ + if (!cn10k_cpt_device_set_inuse(pf)) { + ret = -ENODEV; + goto free_mem; + } + + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + dma_wmb(); + ret = cn10k_wait_for_cpt_respose(pf, res); + if (ret) + goto set_available; + + /* Trigger CTX flush to write dirty data back to DRAM */ + reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7); + otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); + +set_available: + cn10k_cpt_device_set_available(pf); +free_mem: + dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova); + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova); + return ret; +} + +static int cn10k_ipsec_get_hw_ctx_offset(void) +{ + /* Offset on Hardware-context offset in word */ + return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F; +} + +static int cn10k_ipsec_get_ctx_push_size(void) +{ + /* Context push size is round up and in multiple of 8 Byte */ + return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F; +} + +static int cn10k_ipsec_get_aes_key_len(int key_len) +{ + /* key_len is aes key length in bytes */ + switch (key_len) { + case 16: + return CN10K_IPSEC_SA_AES_KEY_LEN_128; + case 24: + return CN10K_IPSEC_SA_AES_KEY_LEN_192; + default: + return CN10K_IPSEC_SA_AES_KEY_LEN_256; + } +} + +static void cn10k_outb_prepare_sa(struct xfrm_state *x, + struct cn10k_tx_sa_s *sa_entry) +{ + int key_len = (x->aead->alg_key_len + 7) / 8; + struct net_device *netdev = x->xso.dev; + u8 *key = x->aead->alg_key; + struct otx2_nic *pf; + u32 *tmp_salt; + u64 *tmp_key; + int idx; + + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + + /* context size, 128 Byte aligned up */ + pf = netdev_priv(netdev); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset(); + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + + /* Ucode to skip two words of CPT_CTX_HW_S */ + sa_entry->ctx_hdr_size = 1; + + /* Allow Atomic operation (AOP) */ + sa_entry->aop_valid = 1; + + /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */ + sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB; + sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP; + sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM; + sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET; + if (x->props.mode == XFRM_MODE_TUNNEL) + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL; + else + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT; + + /* Last 4 bytes are salt */ + key_len -= 4; + sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len); + memcpy(sa_entry->cipher_key, key, key_len); + tmp_key = (u64 *)sa_entry->cipher_key; + + for (idx = 0; idx < key_len / 8; idx++) + tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]); + + memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4); + tmp_salt = (u32 *)&sa_entry->iv_gcm_salt; + *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt); + + /* Write SA context data to memory before enabling */ + wmb(); + + /* Enable SA */ + sa_entry->sa_valid = 1; +} + +static int cn10k_ipsec_validate_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->props.aalgo != SADB_AALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload authenticated xfrm states"); + return -EINVAL; + } + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { + NL_SET_ERR_MSG_MOD(extack, + "Only AES-GCM-ICV16 xfrm state may be offloaded"); + return -EINVAL; + } + if (x->props.calgo != SADB_X_CALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload compressed xfrm states"); + return -EINVAL; + } + if (x->props.flags & XFRM_STATE_ESN) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states"); + return -EINVAL; + } + if (x->props.family != AF_INET && x->props.family != AF_INET6) { + NL_SET_ERR_MSG_MOD(extack, + "Only IPv4/v6 xfrm states may be offloaded"); + return -EINVAL; + } + if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload other than crypto-mode"); + return -EINVAL; + } + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) { + NL_SET_ERR_MSG_MOD(extack, + "Only tunnel/transport xfrm states may be offloaded"); + return -EINVAL; + } + if (x->id.proto != IPPROTO_ESP) { + NL_SET_ERR_MSG_MOD(extack, + "Only ESP xfrm state may be offloaded"); + return -EINVAL; + } + if (x->encap) { + NL_SET_ERR_MSG_MOD(extack, + "Encapsulated xfrm state may not be offloaded"); + return -EINVAL; + } + if (!x->aead) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without aead"); + return -EINVAL; + } + + if (x->aead->alg_icv_len != 128) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD ICV length other than 128bit"); + return -EINVAL; + } + if (x->aead->alg_key_len != 128 + 32 && + x->aead->alg_key_len != 192 + 32 && + x->aead->alg_key_len != 256 + 32) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD key length other than 128/192/256bit"); + return -EINVAL; + } + if (x->tfcpad) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with tfc padding"); + return -EINVAL; + } + if (!x->geniv) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without geniv"); + return -EINVAL; + } + if (strcmp(x->geniv, "seqiv")) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with geniv other than seqiv"); + return -EINVAL; + } + return 0; +} + +static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported"); + return -EOPNOTSUPP; +} + +static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + struct net_device *netdev = x->xso.dev; + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + err = cn10k_ipsec_validate_state(x, extack); + if (err) + return err; + + pf = netdev_priv(netdev); + + err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); + if (err) + return err; + + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + cn10k_outb_prepare_sa(x, sa_entry); + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA"); + qmem_free(pf->dev, sa_info); + return err; + } + + x->xso.offload_handle = (unsigned long)sa_info; + /* Enable static branch when first SA setup */ + if (!pf->ipsec.outb_sa_count) + static_branch_enable(&cn10k_ipsec_sa_enabled); + pf->ipsec.outb_sa_count++; + return 0; +} + +static int cn10k_ipsec_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return cn10k_ipsec_inb_add_state(x, extack); + else + return cn10k_ipsec_outb_add_state(x, extack); +} + +static void cn10k_ipsec_del_state(struct xfrm_state *x) +{ + struct net_device *netdev = x->xso.dev; + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return; + + pf = netdev_priv(netdev); + + sa_info = (struct qmem *)x->xso.offload_handle; + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + /* Disable SA in CPT h/w */ + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->aop_valid = 1; + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) + netdev_err(netdev, "Error (%d) deleting SA\n", err); + + x->xso.offload_handle = 0; + qmem_free(pf->dev, sa_info); + + /* If no more SA's then update netdev feature for potential change + * in NETIF_F_HW_ESP. + */ + if (!--pf->ipsec.outb_sa_count) + queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work); +} + +static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ + if (x->props.family == AF_INET) { + /* Offload with IPv4 options is not supported yet */ + if (ip_hdr(skb)->ihl > 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + return true; +} + +static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = { + .xdo_dev_state_add = cn10k_ipsec_add_state, + .xdo_dev_state_delete = cn10k_ipsec_del_state, + .xdo_dev_offload_ok = cn10k_ipsec_offload_ok, +}; + +static void cn10k_ipsec_sa_wq_handler(struct work_struct *work) +{ + struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec, + sa_work); + struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec); + + /* Disable static branch when no more SA enabled */ + static_branch_disable(&cn10k_ipsec_sa_enabled); + rtnl_lock(); + netdev_update_features(pf->netdev); + rtnl_unlock(); +} + +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + /* IPsec offload supported on cn10k */ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return -EOPNOTSUPP; + + /* Initialize CPT for outbound ipsec offload */ + if (enable) + return cn10k_outb_cpt_init(netdev); + + /* Don't do CPT cleanup if SA installed */ + if (pf->ipsec.outb_sa_count) { + netdev_err(pf->netdev, "SA installed on this device\n"); + return -EBUSY; + } + + return cn10k_outb_cpt_clean(pf); +} + +int cn10k_ipsec_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + u32 sa_size; + + if (!is_dev_support_ipsec_offload(pf->pdev)) + return 0; + + /* Each SA entry size is 128 Byte round up in size */ + sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ? + (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) * + OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s); + pf->ipsec.sa_size = sa_size; + + INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler); + pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0); + if (!pf->ipsec.sa_workq) { + netdev_err(pf->netdev, "SA alloc workqueue failed\n"); + return -ENOMEM; + } + + /* Set xfrm device ops */ + netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops; + netdev->hw_features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; + + cn10k_cpt_device_set_unavailable(pf); + return 0; +} +EXPORT_SYMBOL(cn10k_ipsec_init); + +void cn10k_ipsec_clean(struct otx2_nic *pf) +{ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return; + + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + return; + + if (pf->ipsec.sa_workq) { + destroy_workqueue(pf->ipsec.sa_workq); + pf->ipsec.sa_workq = NULL; + } + + cn10k_outb_cpt_clean(pf); +} +EXPORT_SYMBOL(cn10k_ipsec_clean); + +static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x, + struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct iphdr *iph; + u8 *src; + + src = (u8 *)skb->data + ETH_HLEN; + + if (x->props.family == AF_INET) { + iph = (struct iphdr *)src; + return ntohs(iph->tot_len); + } + + ipv6h = (struct ipv6hdr *)src; + return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr); +} + +/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure. + * SG of NIX and CPT are same in size. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + struct cpt_sg_s *cpt_sg = NULL; + struct nix_sqe_sg_s *sg = NULL; + u64 dma_addr, *iova = NULL; + u64 *cpt_iova = NULL; + u16 *sg_lens = NULL; + int seg, len; + + sq->sg[sq->head].num_segs = 0; + cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size); + + for (seg = 0; seg < num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + + cpt_sg += (seg / MAX_SEGS_PER_SG) * 4; + cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg); + } + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + return false; + + sg_lens[seg % MAX_SEGS_PER_SG] = len; + sg->segs++; + *iova++ = dma_addr; + *cpt_iova++ = dma_addr; + + /* Save DMA mapping info for later unmapping */ + sq->sg[sq->head].dma_addr[seg] = dma_addr; + sq->sg[sq->head].size[seg] = len; + sq->sg[sq->head].num_segs++; + + *cpt_sg = *(struct cpt_sg_s *)sg; + cpt_sg->rsvd_63_50 = 0; + } + + sq->sg[sq->head].skb = (u64)skb; + return true; +} + +static u16 cn10k_ipsec_get_param1(u8 iv_offset) +{ + u16 param1_val; + + /* Set Crypto mode, disable L3/L4 checksum */ + param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM | + CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM; + param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT; + return param1_val; +} + +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + struct cpt_inst_s inst; + struct cpt_res_s *res; + struct xfrm_state *x; + struct qmem *sa_info; + dma_addr_t dptr_iova; + struct sec_path *sp; + u8 encap_offset; + u8 auth_offset; + u8 gthr_size; + u8 iv_offset; + u16 dlen; + + /* Check for IPSEC offload enabled */ + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + goto drop; + + sp = skb_sec_path(skb); + if (unlikely(!sp->len)) + goto drop; + + x = xfrm_input_state(skb); + if (unlikely(!x)) + goto drop; + + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) + goto drop; + + dlen = cn10k_ipsec_get_ip_data_len(x, skb); + if (dlen == 0 && netif_msg_tx_err(pf)) { + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n"); + goto drop; + } + + /* Check for valid SA context */ + sa_info = (struct qmem *)x->xso.offload_handle; + if (!sa_info) + goto drop; + + memset(&inst, 0, sizeof(struct cpt_inst_s)); + + /* Get authentication offset */ + if (x->props.family == AF_INET) + auth_offset = sizeof(struct iphdr); + else + auth_offset = sizeof(struct ipv6hdr); + + /* IV offset is after ESP header */ + iv_offset = auth_offset + sizeof(struct ip_esp_hdr); + /* Encap will start after IV */ + encap_offset = iv_offset + GCM_RFC4106_IV_SIZE; + + /* CPT Instruction word-1 */ + res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head)); + res->compcode = 0; + inst.res_addr = sq->cpt_resp->iova + (64 * sq->head); + + /* CPT Instruction word-2 */ + inst.rvu_pf_func = pf->pcifunc; + + /* CPT Instruction word-3: + * Set QORD to force CPT_RES_S write completion + */ + inst.qord = 1; + + /* CPT Instruction word-4 */ + /* inst.dlen should not include ICV length */ + inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8); + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC; + inst.param1 = cn10k_ipsec_get_param1(iv_offset); + + inst.param2 = encap_offset << + CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT; + inst.param2 |= (u16)auth_offset << + CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT; + + /* CPT Instruction word-5 */ + gthr_size = num_segs / MAX_SEGS_PER_SG; + gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size; + + gthr_size &= 0xF; + dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2))); + inst.dptr = dptr_iova | ((u64)gthr_size << 60); + + /* CPT Instruction word-6 */ + inst.rptr = inst.dptr; + + /* CPT Instruction word-7 */ + inst.cptr = sa_info->iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* CPT Instruction word-0 */ + inst.nixtxl = (size / 16) - 1; + inst.dat_offset = ETH_HLEN; + inst.nixtx_offset = sq->sqe_size; + + netdev_tx_sent_queue(txq, skb->len); + + /* Finally Flush the CPT instruction */ + sq->head++; + sq->head &= (sq->sqe_cnt - 1); + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + return true; +drop: + dev_kfree_skb_any(skb); + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h new file mode 100644 index 000000000000..9965df0faa3e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#ifndef CN10K_IPSEC_H +#define CN10K_IPSEC_H + +#include <linux/types.h> + +DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + +/* CPT instruction size in bytes */ +#define CN10K_CPT_INST_SIZE 64 + +/* CPT instruction (CPT_INST_S) queue length */ +#define CN10K_CPT_INST_QLEN 8200 + +/* CPT instruction queue size passed to HW is in units of + * 40*CPT_INST_S messages. + */ +#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40) + +/* CPT needs 320 free entries */ +#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE) +#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40) + +/* CPT instruction queue length in bytes */ +#define CN10K_CPT_INST_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \ + CN10K_CPT_INST_QLEN_EXTRA_BYTES) + +/* CPT instruction group queue length in bytes */ +#define CN10K_CPT_INST_GRP_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16) + +/* CPT FC length in bytes */ +#define CN10K_CPT_Q_FC_LEN 128 + +/* Default CPT engine group for ipsec offload */ +#define CN10K_DEF_CPT_IPSEC_EGRP 1 + +/* CN10K CPT LF registers */ +#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT) +#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10) +#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40) +#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0) +#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100) +#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110) +#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120) +#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3) +#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510) + +/* IPSEC Instruction opcodes */ +#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL +#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL +#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL + +enum cn10k_cpt_comp_e { + CN10K_CPT_COMP_E_NOTDONE = 0x00, + CN10K_CPT_COMP_E_GOOD = 0x01, + CN10K_CPT_COMP_E_FAULT = 0x02, + CN10K_CPT_COMP_E_HWERR = 0x04, + CN10K_CPT_COMP_E_INSTERR = 0x05, + CN10K_CPT_COMP_E_WARN = 0x06, + CN10K_CPT_COMP_E_MASK = 0x3F +}; + +struct cn10k_cpt_inst_queue { + u8 *vaddr; + u8 *real_vaddr; + dma_addr_t dma_addr; + dma_addr_t real_dma_addr; + u32 size; +}; + +enum cn10k_cpt_hw_state_e { + CN10K_CPT_HW_UNAVAILABLE, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE +}; + +struct cn10k_ipsec { + /* Outbound CPT */ + u64 io_addr; + atomic_t cpt_state; + struct cn10k_cpt_inst_queue iq; + + /* SA info */ + u32 sa_size; + u32 outb_sa_count; + struct work_struct sa_work; + struct workqueue_struct *sa_workq; +}; + +/* CN10K IPSEC Security Association (SA) */ +/* SA direction */ +#define CN10K_IPSEC_SA_DIR_INB 0 +#define CN10K_IPSEC_SA_DIR_OUTB 1 +/* SA protocol */ +#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0 +#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1 +/* SA Encryption Type */ +#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5 +/* SA IPSEC mode Transport/Tunnel */ +#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0 +#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1 +/* SA AES Key Length */ +#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1 +#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2 +#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3 +/* IV Source */ +#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0 +#define CN10K_IPSEC_SA_IV_SRC_PACKET 3 + +struct cn10k_tx_sa_s { + u64 esn_en : 1; /* W0 */ + u64 rsvd_w0_1_8 : 8; + u64 hw_ctx_off : 7; + u64 ctx_id : 16; + u64 rsvd_w0_32_47 : 16; + u64 ctx_push_size : 7; + u64 rsvd_w0_55 : 1; + u64 ctx_hdr_size : 2; + u64 aop_valid : 1; + u64 rsvd_w0_59 : 1; + u64 ctx_size : 4; + u64 w1; /* W1 */ + u64 sa_valid : 1; /* W2 */ + u64 sa_dir : 1; + u64 rsvd_w2_2_3 : 2; + u64 ipsec_mode : 1; + u64 ipsec_protocol : 1; + u64 aes_key_len : 2; + u64 enc_type : 3; + u64 rsvd_w2_11_19 : 9; + u64 iv_src : 2; + u64 rsvd_w2_22_31 : 10; + u64 rsvd_w2_32_63 : 32; + u64 w3; /* W3 */ + u8 cipher_key[32]; /* W4 - W7 */ + u32 rsvd_w8_0_31; /* W8 : IV */ + u32 iv_gcm_salt; + u64 rsvd_w9_w30[22]; /* W9 - W30 */ + u64 hw_ctx[6]; /* W31 - W36 */ +}; + +/* CPT instruction parameter-1 */ +#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1 +#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2 +#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20 +#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8 + +/* CPT instruction parameter-2 */ +#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0 +#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8 + +/* CPT Instruction Structure */ +struct cpt_inst_s { + u64 nixtxl : 3; /* W0 */ + u64 doneint : 1; + u64 rsvd_w0_4_15 : 12; + u64 dat_offset : 8; + u64 ext_param1 : 8; + u64 nixtx_offset : 20; + u64 rsvd_w0_52_63 : 12; + u64 res_addr; /* W1 */ + u64 tag : 32; /* W2 */ + u64 tt : 2; + u64 grp : 10; + u64 rsvd_w2_44_47 : 4; + u64 rvu_pf_func : 16; + u64 qord : 1; /* W3 */ + u64 rsvd_w3_1_2 : 2; + u64 wqe_ptr : 61; + u64 dlen : 16; /* W4 */ + u64 param2 : 16; + u64 param1 : 16; + u64 opcode_major : 8; + u64 opcode_minor : 8; + u64 dptr; /* W5 */ + u64 rptr; /* W6 */ + u64 cptr : 60; /* W7 */ + u64 ctx_val : 1; + u64 egrp : 3; +}; + +/* CPT Instruction Result Structure */ +struct cpt_res_s { + u64 compcode : 7; /* W0 */ + u64 doneint : 1; + u64 uc_compcode : 8; + u64 uc_info : 48; + u64 esn; /* W1 */ +}; + +/* CPT SG structure */ +struct cpt_sg_s { + u64 seg1_size : 16; + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_63_50 : 14; +}; + +/* CPT LF_INPROG Register */ +#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0) +#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32) +#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40) + +/* CPT LF_Q_GRP_PTR Register */ +#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0) +#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0) + +/* CPT LF CTX Flush Register */ +#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0) + +#ifdef CONFIG_XFRM_OFFLOAD +int cn10k_ipsec_init(struct net_device *netdev); +void cn10k_ipsec_clean(struct otx2_nic *pf); +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable); +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset); +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size); +#else +static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev) +{ + return 0; +} + +static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf) +{ +} + +static inline __maybe_unused +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + return 0; +} + +static inline bool __maybe_unused +otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + return true; +} + +static inline bool __maybe_unused +cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + return true; +} +#endif +#endif // CN10K_IPSEC_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 6cc7a78968fc..f3b9daffaec3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type) return "SA"; default: return "Unknown"; - }; - - return "Unknown"; + } } static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 523ecb798a7a..2b49bfec7869 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -10,12 +10,19 @@ #include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bitfield.h> +#include <linux/dcbnl.h> +#include <net/xfrm.h> #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_struct.h" #include "cn10k.h" +static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf) +{ + return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en; +} + static void otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { @@ -964,6 +971,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (err) return err; + /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG. + * SG of NIX and CPT are same in size. Allocate memory for CPT SG + * same as NIX SQE for base address alignment. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ + err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt, + sq->sqe_size * 2); + if (err) + return err; + + err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64); + if (err) + return err; + if (qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, TSO_HEADER_SIZE); @@ -1722,18 +1752,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) return -ENOMEM; req->chan_base = 0; -#ifdef CONFIG_DCB - req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; - req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; -#else - req->chan_cnt = 1; - req->bpid_per_chan = 0; -#endif + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = 1; + req->bpid_per_chan = 0; + } return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_nix_config_bp); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) +{ + struct nix_bp_cfg_req *req; + + if (enable) + req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox); + + if (!req) + return -ENOMEM; + + req->chan_base = 0; + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = 1; + req->bpid_per_chan = 0; + } + + return otx2_sync_mbox_msg(&pfvf->mbox); +} +EXPORT_SYMBOL(otx2_nix_cpt_config_bp); + /* Mbox message handlers */ void mbox_handler_cgx_stats(struct otx2_nic *pfvf, struct cgx_stats_rsp *rsp) @@ -1947,3 +2002,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES #undef M + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + const skb_frag_t *frag; + struct page *page; + int offset; + + /* Crypto hardware need write permission for ipsec crypto offload */ + if (unlikely(xfrm_offload(skb))) { + dir = DMA_BIDIRECTIONAL; + skb = skb_unshare(skb, GFP_ATOMIC); + } + + /* First segment is always skb->data */ + if (!seg) { + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + *len = skb_headlen(skb); + } else { + frag = &skb_shinfo(skb)->frags[seg - 1]; + page = skb_frag_page(frag); + offset = skb_frag_off(frag); + *len = skb_frag_size(frag); + } + return otx2_dma_map_page(pfvf, page, offset, *len, dir); +} + +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + struct sk_buff *skb = NULL; + int seg; + + skb = (struct sk_buff *)sg->skb; + if (unlikely(xfrm_offload(skb))) + dir = DMA_BIDIRECTIONAL; + + for (seg = 0; seg < sg->num_segs; seg++) { + otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], + sg->size[seg], dir); + } + sg->num_segs = 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 566848663fea..65814e3dc93f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -30,6 +30,7 @@ #include <rvu_trace.h> #include "qos.h" #include "rep.h" +#include "cn10k_ipsec.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -40,6 +41,7 @@ #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 @@ -55,6 +57,9 @@ #define NIX_PF_PFC_PRIO_MAX 8 #endif +/* Number of segments per SG structure */ +#define MAX_SEGS_PER_SG 3 + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -448,6 +453,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) #define OTX2_FLAG_PORT_UP BIT_ULL(19) +#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) u64 flags; u64 *cq_op_addr; @@ -499,9 +505,9 @@ struct otx2_nic { /* Devlink */ struct otx2_devlink *dl; -#ifdef CONFIG_DCB /* PFC */ u8 pfc_en; +#ifdef CONFIG_DCB u8 *queue_to_pfc_map; u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; @@ -522,6 +528,9 @@ struct otx2_nic { u16 rep_pf_map[RVU_MAX_REP]; u16 esw_mode; #endif + + /* Inline ipsec */ + struct cn10k_ipsec ipsec; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -572,6 +581,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev) return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; } +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) +{ + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && + (pdev->revision & 0xFF) == 0x54) + return true; + + return false; +} + static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; @@ -621,6 +639,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) case BLKTYPE_NPA: blkaddr = BLKADDR_NPA; break; + case BLKTYPE_CPT: + blkaddr = BLKADDR_CPT0; + break; default: blkaddr = BLKADDR_RVUM; break; @@ -985,6 +1006,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); @@ -1149,4 +1171,8 @@ static inline int mcam_entry_cmp(const void *a, const void *b) { return *(u16 *)a - *(u16 *)b; } + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len); +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index 294fba58b670..f110dfa42360 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -435,6 +435,9 @@ process_pfc: return err; } + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pfvf, false); + /* Request Per channel Bpids */ if (pfc->pfc_en) otx2_nix_config_bp(pfvf, true); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e310f99b1736..e1dde93e8af8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -26,6 +26,7 @@ #include "cn10k.h" #include "qos.h" #include <rvu_trace.h> +#include "cn10k_ipsec.h" #define DRV_NAME "rvu_nicpf" #define DRV_STRING "Marvell RVU NIC Physical Function Driver" @@ -1484,6 +1485,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf) if (!sq->sqe) continue; qmem_free(pf->dev, sq->sqe); + qmem_free(pf->dev, sq->sqe_ring); + qmem_free(pf->dev, sq->cpt_resp); qmem_free(pf->dev, sq->tso_hdrs); kfree(sq->sg); kfree(sq->sqb_ptrs); @@ -1551,6 +1554,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf) if (err) goto err_free_npa_lf; + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pf, false); + /* Enable backpressure for CGX mapped PF/VFs */ if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, true); @@ -2273,6 +2279,10 @@ static int otx2_set_features(struct net_device *netdev, return otx2_enable_rxvlan(pf, features & NETIF_F_HW_VLAN_CTAG_RX); + if (changed & NETIF_F_HW_ESP) + return cn10k_ipsec_ethtool_init(netdev, + features & NETIF_F_HW_ESP); + return otx2_handle_ntuple_tc_features(netdev, features); } @@ -3162,10 +3172,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* reset CGX/RPM MAC stats */ otx2_reset_mac_stats(pf); + err = cn10k_ipsec_init(netdev); + if (err) + goto err_mcs_free; + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_mcs_free; + goto err_ipsec_clean; } err = otx2_wq_init(pf); @@ -3206,6 +3220,8 @@ err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(pf); err_mcs_free: cn10k_mcs_free(pf); err_del_mcam_entries: @@ -3403,6 +3419,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_unregister_dl(pf); unregister_netdev(netdev); + cn10k_ipsec_clean(pf); cn10k_mcs_free(pf); otx2_sriov_disable(pf->pdev); otx2_sriov_vfcfg_cleanup(pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 04bc06a80e23..224cef938927 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -11,6 +11,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <net/ip6_checksum.h> +#include <net/xfrm.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -26,12 +27,25 @@ */ #define PTP_SYNC_SEC_OFFSET 34 +DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, bool *need_xdp_flush); +static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq, + struct sk_buff *skb) +{ + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + sq->sqe_base = sq->sqe_ring->base + sq->sqe_size + + (sq->head * (sq->sqe_size * 2)); + else + sq->sqe_base = sq->sqe->base; +} + static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) { @@ -80,38 +94,6 @@ static unsigned int frag_num(unsigned int i) #endif } -static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, - struct sk_buff *skb, int seg, int *len) -{ - const skb_frag_t *frag; - struct page *page; - int offset; - - /* First segment is always skb->data */ - if (!seg) { - page = virt_to_page(skb->data); - offset = offset_in_page(skb->data); - *len = skb_headlen(skb); - } else { - frag = &skb_shinfo(skb)->frags[seg - 1]; - page = skb_frag_page(frag); - offset = skb_frag_off(frag); - *len = skb_frag_size(frag); - } - return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); -} - -static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) -{ - int seg; - - for (seg = 0; seg < sg->num_segs; seg++) { - otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], - sg->size[seg], DMA_TO_DEVICE); - } - sg->num_segs = 0; -} - static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct nix_cqe_tx_s *cqe) @@ -625,7 +607,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, sq->head &= (sq->sqe_cnt - 1); } -#define MAX_SEGS_PER_SG 3 /* Add SQE scatter/gather subdescriptor structure */ static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int num_segs, int *offset) @@ -1161,6 +1142,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; struct otx2_nic *pfvf = dev; + bool ret; /* Check if there is enough room between producer * and consumer index. @@ -1177,6 +1159,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, /* If SKB doesn't fit in a single SQE, linearize it. * TODO: Consider adding JUMP descriptor instead. */ + if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); @@ -1196,6 +1179,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, return true; } + /* Set sqe base address */ + otx2_sq_set_sqe_base(sq, skb); + /* Set SQE's SEND_HDR. * Do not clear the first 64bit as it contains constant info. */ @@ -1208,7 +1194,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, otx2_sqe_add_ext(pfvf, sq, skb, &offset); /* Add SG subdesc with data frags */ - if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset); + else + ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset); + + if (!ret) { otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); return false; } @@ -1217,11 +1209,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, sqe_hdr->sizem1 = (offset / 16) - 1; + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs, + offset); + netdev_tx_sent_queue(txq, skb->len); /* Flush SQE to HW */ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); - return true; } EXPORT_SYMBOL(otx2_sq_append_skb); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index e1db5f961877..d23810963fdb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -101,6 +101,9 @@ struct otx2_snd_queue { struct queue_stats stats; u16 sqb_count; u64 *sqb_ptrs; + /* SQE ring and CPT response queue for Inline IPSEC */ + struct qmem *sqe_ring; + struct qmem *cpt_resp; } ____cacheline_aligned_in_smp; enum cq_type { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 839fc77c11b2..e926c6ce96cf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -14,6 +14,7 @@ #include "otx2_reg.h" #include "otx2_ptp.h" #include "cn10k.h" +#include "cn10k_ipsec.h" #define DRV_NAME "rvu_nicvf" #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" @@ -693,10 +694,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) pdev->bus->number, n); } + err = cn10k_ipsec_init(netdev); + if (err) + goto err_ptp_destroy; + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_ptp_destroy; + goto err_ipsec_clean; } err = otx2_vf_wq_init(vf); @@ -730,6 +735,8 @@ err_shutdown_tc: otx2_shutdown_tc(vf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(vf); err_ptp_destroy: otx2_ptp_destroy(vf); err_detach_rsrc: @@ -782,6 +789,7 @@ static void otx2vf_remove(struct pci_dev *pdev) unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); + cn10k_ipsec_clean(vf); otx2_ptp_destroy(vf); otx2_mcam_flow_del(vf); otx2_shutdown_tc(vf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 232b10740c13..04e08e06f30f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -680,14 +680,17 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) ndev->features |= ndev->hw_features; eth_hw_addr_random(ndev); err = rvu_rep_devlink_port_register(rep); - if (err) + if (err) { + free_netdev(ndev); goto exit; + } SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port); err = register_netdev(ndev); if (err) { NL_SET_ERR_MSG_MOD(extack, "PFVF representor registration failed"); + rvu_rep_devlink_port_unregister(rep); free_netdev(ndev); goto exit; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 22ca6ee9665e..440a4c42b405 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config, } static void prestera_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct prestera_port *port = container_of(pcs, struct prestera_port, diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 25bf6ec44289..a1bada9eaaf6 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3742,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused, skge = netdev_priv(dev); switch (event) { case NETDEV_CHANGENAME: - if (skge->debugfs) - skge->debugfs = debugfs_rename(skge_debug, - skge->debugfs, - skge_debug, dev->name); + debugfs_change_name(skge->debugfs, "%s", dev->name); break; case NETDEV_GOING_DOWN: diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 3914cd9210d4..d7121c836508 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */ @@ -4493,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused, switch (event) { case NETDEV_CHANGENAME: - if (sky2->debugfs) { - sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, - sky2_debug, dev->name); - } + debugfs_change_name(sky2->debugfs, "%s", dev->name); break; case NETDEV_GOING_DOWN: diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 6c683a12d5aa..09f448f29124 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -15,6 +15,7 @@ #include <linux/u64_stats_sync.h> #include <net/dsa.h> #include <net/page_pool/helpers.h> +#include <net/pkt_cls.h> #include <uapi/linux/ppp_defs.h> #define AIROHA_MAX_NUM_GDM_PORTS 1 @@ -23,8 +24,12 @@ #define AIROHA_MAX_NUM_XSI_RSTS 5 #define AIROHA_MAX_MTU 2000 #define AIROHA_MAX_PACKET_SIZE 2048 +#define AIROHA_NUM_QOS_CHANNELS 4 +#define AIROHA_NUM_QOS_QUEUES 8 #define AIROHA_NUM_TX_RING 32 #define AIROHA_NUM_RX_RING 32 +#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \ + AIROHA_NUM_QOS_CHANNELS) #define AIROHA_FE_MC_MAX_VLAN_TABLE 64 #define AIROHA_FE_MC_MAX_VLAN_PORT 16 #define AIROHA_NUM_TX_IRQ 2 @@ -40,6 +45,9 @@ #define PSE_RSV_PAGES 128 #define PSE_QUEUE_RSV_PAGES 64 +#define QDMA_METER_IDX(_n) ((_n) & 0xff) +#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3) + /* FE */ #define PSE_BASE 0x0100 #define CSR_IFC_BASE 0x0200 @@ -258,11 +266,11 @@ #define REG_GDM3_FWD_CFG GDM3_BASE #define GDM3_PAD_EN_MASK BIT(28) -#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100) +#define REG_GDM4_FWD_CFG GDM4_BASE #define GDM4_PAD_EN_MASK BIT(28) #define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) -#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c) +#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c) #define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) #define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) #define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) @@ -541,9 +549,24 @@ #define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) #define INGRESS_FAST_TICK_MASK GENMASK(15, 0) +#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc)) +#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3)) + #define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0) #define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2) +#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3)) +#define CNTR_EN_MASK BIT(31) +#define CNTR_ALL_CHAN_EN_MASK BIT(30) +#define CNTR_ALL_QUEUE_EN_MASK BIT(29) +#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28) +#define CNTR_SRC_MASK GENMASK(27, 24) +#define CNTR_DSCP_RING_MASK GENMASK(20, 16) +#define CNTR_CHAN_MASK GENMASK(7, 3) +#define CNTR_QUEUE_MASK GENMASK(2, 0) + +#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3)) + #define REG_LMGR_INIT_CFG 0x1000 #define LMGR_INIT_START BIT(31) #define LMGR_SRAM_MODE_MASK BIT(30) @@ -565,13 +588,34 @@ #define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) #define EGRESS_FAST_TICK_MASK GENMASK(15, 0) +#define TRTCM_PARAM_RW_MASK BIT(31) +#define TRTCM_PARAM_RW_DONE_MASK BIT(30) +#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28) +#define TRTCM_METER_GROUP_MASK GENMASK(27, 26) +#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17) +#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16) + +#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4) +#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8) +#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc) + #define REG_TXWRR_MODE_CFG 0x1020 #define TWRR_WEIGHT_SCALE_MASK BIT(31) #define TWRR_WEIGHT_BASE_MASK BIT(3) +#define REG_TXWRR_WEIGHT_CFG 0x1024 +#define TWRR_RW_CMD_MASK BIT(31) +#define TWRR_RW_CMD_DONE BIT(30) +#define TWRR_CHAN_IDX_MASK GENMASK(23, 19) +#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16) +#define TWRR_VALUE_MASK GENMASK(15, 0) + #define REG_PSE_BUF_USAGE_CFG 0x1028 #define PSE_BUF_ESTIMATE_EN_MASK BIT(29) +#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2)) +#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2) + #define REG_GLB_TRTCM_CFG 0x1080 #define GLB_TRTCM_EN_MASK BIT(31) #define GLB_TRTCM_MODE_MASK BIT(30) @@ -720,6 +764,40 @@ enum { FE_PSE_PORT_DROP = 0xf, }; +enum tx_sched_mode { + TC_SCH_WRR8, + TC_SCH_SP, + TC_SCH_WRR7, + TC_SCH_WRR6, + TC_SCH_WRR5, + TC_SCH_WRR4, + TC_SCH_WRR3, + TC_SCH_WRR2, +}; + +enum trtcm_param_type { + TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */ + TRTCM_TOKEN_RATE_MODE, + TRTCM_BUCKETSIZE_SHIFT_MODE, + TRTCM_BUCKET_COUNTER_MODE, +}; + +enum trtcm_mode_type { + TRTCM_COMMIT_MODE, + TRTCM_PEAK_MODE, +}; + +enum trtcm_param { + TRTCM_TICK_SEL = BIT(0), + TRTCM_PKT_MODE = BIT(1), + TRTCM_METER_MODE = BIT(2), +}; + +#define MIN_TOKEN_SIZE 4096 +#define MAX_TOKEN_SIZE_OFFSET 17 +#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6) +#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0) + struct airoha_queue_entry { union { void *buf; @@ -810,6 +888,12 @@ struct airoha_gdm_port { int id; struct airoha_hw_stats stats; + + DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS); + + /* qos stats counters */ + u64 cpu_tx_packets; + u64 fwd_tx_packets; }; struct airoha_eth { @@ -1789,6 +1873,10 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q, WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); } + /* xmit ring drop default setting */ + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid), + TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK); + airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); @@ -1955,6 +2043,27 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma) FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); } +static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma) +{ + int i; + + for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) { + /* Tx-cpu transferred count */ + airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0); + airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), + CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | + CNTR_ALL_DSCP_RING_EN_MASK | + FIELD_PREP(CNTR_CHAN_MASK, i)); + /* Tx-fwd transferred count */ + airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0); + airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), + CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | + CNTR_ALL_DSCP_RING_EN_MASK | + FIELD_PREP(CNTR_SRC_MASK, 1) | + FIELD_PREP(CNTR_CHAN_MASK, i)); + } +} + static int airoha_qdma_hw_init(struct airoha_qdma *qdma) { int i; @@ -2005,6 +2114,7 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma) airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); + airoha_qdma_init_qos_stats(qdma); return 0; } @@ -2138,17 +2248,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma) if (!qdma->q_rx[i].ndesc) continue; - napi_disable(&qdma->q_rx[i].napi); netif_napi_del(&qdma->q_rx[i].napi); airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); if (qdma->q_rx[i].page_pool) page_pool_destroy(qdma->q_rx[i].page_pool); } - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { - napi_disable(&qdma->q_tx_irq[i].napi); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) netif_napi_del(&qdma->q_tx_irq[i].napi); - } for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { if (!qdma->q_tx[i].ndesc) @@ -2173,6 +2280,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma) } } +static void airoha_qdma_stop_napi(struct airoha_qdma *qdma) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) + napi_disable(&qdma->q_tx_irq[i].napi); + + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) + continue; + + napi_disable(&qdma->q_rx[i].napi); + } +} + static void airoha_update_hw_stats(struct airoha_gdm_port *port) { struct airoha_eth *eth = port->qdma->eth; @@ -2413,21 +2535,44 @@ static void airoha_dev_get_stats64(struct net_device *dev, } while (u64_stats_fetch_retry(&port->stats.syncp, start)); } +static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + int queue, channel; + + /* For dsa device select QoS channel according to the dsa user port + * index, rely on port id otherwise. Select QoS queue based on the + * skb priority. + */ + channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id; + channel = channel % AIROHA_NUM_QOS_CHANNELS; + queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */ + queue = channel * AIROHA_NUM_QOS_QUEUES + queue; + + return queue < dev->num_tx_queues ? queue : 0; +} + static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct skb_shared_info *sinfo = skb_shinfo(skb); struct airoha_gdm_port *port = netdev_priv(dev); - u32 msg0 = 0, msg1, len = skb_headlen(skb); - int i, qid = skb_get_queue_mapping(skb); + u32 msg0, msg1, len = skb_headlen(skb); struct airoha_qdma *qdma = port->qdma; u32 nr_frags = 1 + sinfo->nr_frags; struct netdev_queue *txq; struct airoha_queue *q; void *data = skb->data; + int i, qid; u16 index; u8 fport; + qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); + msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK, + qid / AIROHA_NUM_QOS_QUEUES) | + FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK, + qid % AIROHA_NUM_QOS_QUEUES); if (skb->ip_summed == CHECKSUM_PARTIAL) msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | @@ -2597,13 +2742,399 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev, } while (u64_stats_fetch_retry(&port->stats.syncp, start)); } +static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port, + int channel, enum tx_sched_mode mode, + const u16 *weights, u8 n_weights) +{ + int i; + + for (i = 0; i < AIROHA_NUM_TX_RING; i++) + airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel), + TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i)); + + for (i = 0; i < n_weights; i++) { + u32 status; + int err; + + airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG, + TWRR_RW_CMD_MASK | + FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) | + FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) | + FIELD_PREP(TWRR_VALUE_MASK, weights[i])); + err = read_poll_timeout(airoha_qdma_rr, status, + status & TWRR_RW_CMD_DONE, + USEC_PER_MSEC, 10 * USEC_PER_MSEC, + true, port->qdma, + REG_TXWRR_WEIGHT_CFG); + if (err) + return err; + } + + airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3), + CHAN_QOS_MODE_MASK(channel), + mode << __ffs(CHAN_QOS_MODE_MASK(channel))); + + return 0; +} + +static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port, + int channel) +{ + static const u16 w[AIROHA_NUM_QOS_QUEUES] = {}; + + return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w, + ARRAY_SIZE(w)); +} + +static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port, + int channel, + struct tc_ets_qopt_offload *opt) +{ + struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params; + enum tx_sched_mode mode = TC_SCH_SP; + u16 w[AIROHA_NUM_QOS_QUEUES] = {}; + int i, nstrict = 0, nwrr, qidx; + + if (p->bands > AIROHA_NUM_QOS_QUEUES) + return -EINVAL; + + for (i = 0; i < p->bands; i++) { + if (!p->quanta[i]) + nstrict++; + } + + /* this configuration is not supported by the hw */ + if (nstrict == AIROHA_NUM_QOS_QUEUES - 1) + return -EINVAL; + + /* EN7581 SoC supports fixed QoS band priority where WRR queues have + * lowest priorities with respect to SP ones. + * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn + */ + nwrr = p->bands - nstrict; + qidx = nstrict && nwrr ? nstrict : 0; + for (i = 1; i <= p->bands; i++) { + if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx) + return -EINVAL; + + qidx = i == nwrr ? 0 : qidx + 1; + } + + for (i = 0; i < nwrr; i++) + w[i] = p->weights[nstrict + i]; + + if (!nstrict) + mode = TC_SCH_WRR8; + else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1) + mode = nstrict + 1; + + return airoha_qdma_set_chan_tx_sched(port, channel, mode, w, + ARRAY_SIZE(w)); +} + +static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port, + int channel, + struct tc_ets_qopt_offload *opt) +{ + u64 cpu_tx_packets = airoha_qdma_rr(port->qdma, + REG_CNTR_VAL(channel << 1)); + u64 fwd_tx_packets = airoha_qdma_rr(port->qdma, + REG_CNTR_VAL((channel << 1) + 1)); + u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) + + (fwd_tx_packets - port->fwd_tx_packets); + _bstats_update(opt->stats.bstats, 0, tx_packets); + + port->cpu_tx_packets = cpu_tx_packets; + port->fwd_tx_packets = fwd_tx_packets; + + return 0; +} + +static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port, + struct tc_ets_qopt_offload *opt) +{ + int channel; + + if (opt->parent == TC_H_ROOT) + return -EINVAL; + + channel = TC_H_MAJ(opt->handle) >> 16; + channel = channel % AIROHA_NUM_QOS_CHANNELS; + + switch (opt->command) { + case TC_ETS_REPLACE: + return airoha_qdma_set_tx_ets_sched(port, channel, opt); + case TC_ETS_DESTROY: + /* PRIO is default qdisc scheduler */ + return airoha_qdma_set_tx_prio_sched(port, channel); + case TC_ETS_STATS: + return airoha_qdma_get_tx_ets_stats(port, channel, opt); + default: + return -EOPNOTSUPP; + } +} + +static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel, + u32 addr, enum trtcm_param_type param, + enum trtcm_mode_type mode, + u32 *val_low, u32 *val_high) +{ + u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); + u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | + FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | + FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | + FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); + + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); + if (read_poll_timeout(airoha_qdma_rr, val, + val & TRTCM_PARAM_RW_DONE_MASK, + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, + qdma, REG_TRTCM_CFG_PARAM(addr))) + return -ETIMEDOUT; + + *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); + if (val_high) + *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); + + return 0; +} + +static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel, + u32 addr, enum trtcm_param_type param, + enum trtcm_mode_type mode, u32 val) +{ + u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); + u32 config = TRTCM_PARAM_RW_MASK | + FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | + FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | + FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | + FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); + + airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); + + return read_poll_timeout(airoha_qdma_rr, val, + val & TRTCM_PARAM_RW_DONE_MASK, + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, + qdma, REG_TRTCM_CFG_PARAM(addr)); +} + +static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel, + u32 addr, enum trtcm_mode_type mode, + bool enable, u32 enable_mask) +{ + u32 val; + + if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, + mode, &val, NULL)) + return -EINVAL; + + val = enable ? val | enable_mask : val & ~enable_mask; + + return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, + mode, val); +} + +static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma, + int channel, u32 addr, + enum trtcm_mode_type mode, + u32 rate_val, u32 bucket_size) +{ + u32 val, config, tick, unit, rate, rate_frac; + int err; + + if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, + mode, &config, NULL)) + return -EINVAL; + + val = airoha_qdma_rr(qdma, addr); + tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val); + if (config & TRTCM_TICK_SEL) + tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val); + if (!tick) + return -EINVAL; + + unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick; + if (!unit) + return -EINVAL; + + rate = rate_val / unit; + rate_frac = rate_val % unit; + rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit; + rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) | + FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac); + + err = airoha_qdma_set_trtcm_param(qdma, channel, addr, + TRTCM_TOKEN_RATE_MODE, mode, rate); + if (err) + return err; + + val = max_t(u32, bucket_size, MIN_TOKEN_SIZE); + val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET); + + return airoha_qdma_set_trtcm_param(qdma, channel, addr, + TRTCM_BUCKETSIZE_SHIFT_MODE, + mode, val); +} + +static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port, + int channel, u32 rate, + u32 bucket_size) +{ + int i, err; + + for (i = 0; i <= TRTCM_PEAK_MODE; i++) { + err = airoha_qdma_set_trtcm_config(port->qdma, channel, + REG_EGRESS_TRTCM_CFG, i, + !!rate, TRTCM_METER_MODE); + if (err) + return err; + + err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel, + REG_EGRESS_TRTCM_CFG, + i, rate, bucket_size); + if (err) + return err; + } + + return 0; +} + +static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port, + struct tc_htb_qopt_offload *opt) +{ + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; + u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */ + struct net_device *dev = port->dev; + int num_tx_queues = dev->real_num_tx_queues; + int err; + + if (opt->parent_classid != TC_HTB_CLASSID_ROOT) { + NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid"); + return -EINVAL; + } + + err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum); + if (err) { + NL_SET_ERR_MSG_MOD(opt->extack, + "failed configuring htb offload"); + return err; + } + + if (opt->command == TC_HTB_NODE_MODIFY) + return 0; + + err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1); + if (err) { + airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum); + NL_SET_ERR_MSG_MOD(opt->extack, + "failed setting real_num_tx_queues"); + return err; + } + + set_bit(channel, port->qos_sq_bmap); + opt->qid = AIROHA_NUM_TX_RING + channel; + + return 0; +} + +static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue) +{ + struct net_device *dev = port->dev; + + netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1); + airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0); + clear_bit(queue, port->qos_sq_bmap); +} + +static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port, + struct tc_htb_qopt_offload *opt) +{ + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; + + if (!test_bit(channel, port->qos_sq_bmap)) { + NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); + return -EINVAL; + } + + airoha_tc_remove_htb_queue(port, channel); + + return 0; +} + +static int airoha_tc_htb_destroy(struct airoha_gdm_port *port) +{ + int q; + + for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS) + airoha_tc_remove_htb_queue(port, q); + + return 0; +} + +static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port, + struct tc_htb_qopt_offload *opt) +{ + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; + + if (!test_bit(channel, port->qos_sq_bmap)) { + NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); + return -EINVAL; + } + + opt->qid = channel; + + return 0; +} + +static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port, + struct tc_htb_qopt_offload *opt) +{ + switch (opt->command) { + case TC_HTB_CREATE: + break; + case TC_HTB_DESTROY: + return airoha_tc_htb_destroy(port); + case TC_HTB_NODE_MODIFY: + case TC_HTB_LEAF_ALLOC_QUEUE: + return airoha_tc_htb_alloc_leaf_queue(port, opt); + case TC_HTB_LEAF_DEL: + case TC_HTB_LEAF_DEL_LAST: + case TC_HTB_LEAF_DEL_LAST_FORCE: + return airoha_tc_htb_delete_leaf_queue(port, opt); + case TC_HTB_LEAF_QUERY_QUEUE: + return airoha_tc_get_htb_get_leaf_queue(port, opt); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_ETS: + return airoha_tc_setup_qdisc_ets(port, type_data); + case TC_SETUP_QDISC_HTB: + return airoha_tc_setup_qdisc_htb(port, type_data); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops airoha_netdev_ops = { .ndo_init = airoha_dev_init, .ndo_open = airoha_dev_open, .ndo_stop = airoha_dev_stop, + .ndo_select_queue = airoha_dev_select_queue, .ndo_start_xmit = airoha_dev_xmit, .ndo_get_stats64 = airoha_dev_get_stats64, .ndo_set_mac_address = airoha_dev_set_macaddr, + .ndo_setup_tc = airoha_dev_tc_setup, }; static const struct ethtool_ops airoha_ethtool_ops = { @@ -2640,7 +3171,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) } dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), - AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING); + AIROHA_NUM_NETDEV_TX_RINGS, + AIROHA_NUM_RX_RING); if (!dev) { dev_err(eth->dev, "alloc_etherdev failed\n"); return -ENOMEM; @@ -2653,12 +3185,18 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) dev->watchdog_timeo = 5 * HZ; dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO6 | NETIF_F_IPV6_CSUM | - NETIF_F_SG | NETIF_F_TSO; + NETIF_F_SG | NETIF_F_TSO | + NETIF_F_HW_TC; dev->features |= dev->hw_features; dev->dev.of_node = np; dev->irq = qdma->irq; SET_NETDEV_DEV(dev, eth->dev); + /* reserve hw queues for HTB offloading */ + err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING); + if (err) + return err; + err = of_get_ethdev_address(np, dev); if (err) { if (err == -EPROBE_DEFER) @@ -2738,7 +3276,7 @@ static int airoha_probe(struct platform_device *pdev) err = airoha_hw_init(pdev, eth); if (err) - goto error; + goto error_hw_cleanup; for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) airoha_qdma_start_napi(ð->qdma[i]); @@ -2753,13 +3291,16 @@ static int airoha_probe(struct platform_device *pdev) err = airoha_alloc_gdm_port(eth, np); if (err) { of_node_put(np); - goto error; + goto error_napi_stop; } } return 0; -error: +error_napi_stop: + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + airoha_qdma_stop_napi(ð->qdma[i]); +error_hw_cleanup: for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) airoha_hw_cleanup(ð->qdma[i]); @@ -2780,8 +3321,10 @@ static void airoha_remove(struct platform_device *pdev) struct airoha_eth *eth = platform_get_drvdata(pdev); int i; - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { + airoha_qdma_stop_napi(ð->qdma[i]); airoha_hw_cleanup(ð->qdma[i]); + } for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { struct airoha_gdm_port *port = eth->ports[i]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index be3d0876c521..568bbe5f83f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \ - fw_reset.o qos.o lib/tout.o lib/aso.o wc.o + fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o # # Netdev basic @@ -60,6 +60,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a ifneq ($(CONFIG_MLX5_TC_CT),) mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o + mlx5_core-$(CONFIG_MLX5_HW_STEERING) += en/tc/ct_fs_hmfs.o endif mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o @@ -123,6 +124,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \ steering/sws/dr_ste_v0.o \ steering/sws/dr_ste_v1.o \ steering/sws/dr_ste_v2.o \ + steering/sws/dr_ste_v3.o \ steering/sws/dr_cmd.o \ steering/sws/dr_fw.o \ steering/sws/dr_action.o \ @@ -150,8 +152,9 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \ steering/hws/bwc.o \ steering/hws/debug.o \ steering/hws/vport.o \ - steering/hws/bwc_complex.o - + steering/hws/bwc_complex.o \ + steering/hws/fs_hws_pools.o \ + steering/hws/fs_hws.o # # SF device diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 6bd8a18e3af3..e733b81e18a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1013,6 +1013,7 @@ static void cmd_work_handler(struct work_struct *work) complete(&ent->done); } up(&cmd->vars.sem); + complete(&ent->slotted); return; } } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 9aed29fa4900..d6e736c1fb24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule, if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER) __entry->counter_id = - rule->dest_attr.counter_id; + mlx5_fc_id(rule->dest_attr.counter); ), TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n", __entry->rule, __entry->fte, __entry->index, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h index 62b3f7ff5562..e5b30801314b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h @@ -48,4 +48,14 @@ mlx5_ct_fs_smfs_ops_get(void) } #endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */ +#if IS_ENABLED(CONFIG_MLX5_HW_STEERING) +struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void); +#else +static inline struct mlx5_ct_fs_ops * +mlx5_ct_fs_hmfs_ops_get(void) +{ + return NULL; +} +#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */ + #endif /* __MLX5_EN_TC_CT_FS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c new file mode 100644 index 000000000000..a4263137fef5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */ + +#include "en_tc.h" +#include "en/tc_ct.h" +#include "en/tc_priv.h" +#include "en/tc/ct_fs.h" +#include "fs_core.h" +#include "steering/hws/fs_hws_pools.h" +#include "steering/hws/mlx5hws.h" +#include "steering/hws/table.h" + +struct mlx5_ct_fs_hmfs_matcher { + struct mlx5hws_bwc_matcher *hws_bwc_matcher; + refcount_t ref; +}; + +/* We need {ipv4, ipv6} x {tcp, udp, gre} matchers. */ +#define NUM_MATCHERS (2 * 3) + +struct mlx5_ct_fs_hmfs { + struct mlx5hws_table *ct_tbl; + struct mlx5hws_table *ct_nat_tbl; + struct mlx5_flow_table *ct_nat; + struct mlx5hws_action *fwd_action; + struct mlx5hws_action *last_action; + struct mlx5hws_context *ctx; + struct mutex lock; /* Guards matchers */ + struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS]; + struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS]; +}; + +struct mlx5_ct_fs_hmfs_rule { + struct mlx5_ct_fs_rule fs_rule; + struct mlx5hws_bwc_rule *hws_bwc_rule; + struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher; + struct mlx5_fc *counter; +}; + +static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre) +{ + return ipv4 * 3 + tcp * 2 + gre; +} + +static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct, + struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl; + struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs); + + ct_tbl = ct->fs_hws_table.hws_table; + ct_nat_tbl = ct_nat->fs_hws_table.hws_table; + post_ct_tbl = post_ct->fs_hws_table.hws_table; + fs_hmfs->ct_nat = ct_nat; + + if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) { + netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables"); + return -EOPNOTSUPP; + } + + netdev_dbg(fs->netdev, "using hmfs steering"); + + fs_hmfs->ct_tbl = ct_tbl; + fs_hmfs->ct_nat_tbl = ct_nat_tbl; + fs_hmfs->ctx = ct_tbl->ctx; + mutex_init(&fs_hmfs->lock); + + fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags); + if (!fs_hmfs->fwd_action) { + netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n"); + return -EINVAL; + } + fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags); + if (!fs_hmfs->last_action) { + netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n"); + mlx5hws_action_destroy(fs_hmfs->fwd_action); + return -EINVAL; + } + + return 0; +} + +static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs) +{ + struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs); + + mlx5hws_action_destroy(fs_hmfs->last_action); + mlx5hws_action_destroy(fs_hmfs->fwd_action); +} + +static struct mlx5hws_bwc_matcher * +mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl, + struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre) +{ + u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS; + struct mlx5hws_match_parameters mask = { + .match_buf = spec->match_criteria, + .match_sz = sizeof(spec->match_criteria), + }; + u32 priority = get_matcher_idx(ipv4, tcp, gre); /* Static priority based on params. */ + struct mlx5hws_bwc_matcher *hws_bwc_matcher; + + hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask); + if (!hws_bwc_matcher) + return ERR_PTR(-EINVAL); + + return hws_bwc_matcher; +} + +static struct mlx5_ct_fs_hmfs_matcher * +mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, + bool nat, bool ipv4, bool tcp, bool gre) +{ + struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs); + u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre); + struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher; + struct mlx5hws_bwc_matcher *hws_bwc_matcher; + struct mlx5hws_table *tbl; + + hmfs_matcher = nat ? + (fs_hmfs->matchers_nat + matcher_idx) : + (fs_hmfs->matchers + matcher_idx); + + if (refcount_inc_not_zero(&hmfs_matcher->ref)) + return hmfs_matcher; + + mutex_lock(&fs_hmfs->lock); + + /* Retry with lock, as the matcher might be already created by another cpu. */ + if (refcount_inc_not_zero(&hmfs_matcher->ref)) + goto out_unlock; + + tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl; + + hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre); + if (IS_ERR(hws_bwc_matcher)) { + netdev_warn(fs->netdev, + "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n", + nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher)); + + hmfs_matcher = ERR_CAST(hws_bwc_matcher); + goto out_unlock; + } + + hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher; + refcount_set(&hmfs_matcher->ref, 1); + +out_unlock: + mutex_unlock(&fs_hmfs->lock); + return hmfs_matcher; +} + +static void +mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher) +{ + struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs); + + if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock)) + return; + + mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher); + mutex_unlock(&fs_hmfs->lock); +} + +#define NUM_CT_HMFS_RULES 4 + +static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs, + struct mlx5_flow_attr *attr, + struct mlx5hws_rule_action *rule_actions) +{ + struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action; + + memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions)); + rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter); + /* Modify header is special, it may require extra arguments outside the action itself. */ + if (mh_action->mh_data) { + rule_actions[1].modify_header.offset = mh_action->mh_data->offset; + rule_actions[1].modify_header.data = mh_action->mh_data->data; + } + rule_actions[1].action = mh_action->hws_action; + rule_actions[2].action = fs_hmfs->fwd_action; + rule_actions[3].action = fs_hmfs->last_action; +} + +static struct mlx5_ct_fs_rule * +mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, struct flow_rule *flow_rule) +{ + struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES]; + struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs); + struct mlx5hws_match_parameters match_params = { + .match_buf = spec->match_value, + .match_sz = ARRAY_SIZE(spec->match_value), + }; + struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher; + struct mlx5_ct_fs_hmfs_rule *hmfs_rule; + bool nat, tcp, ipv4, gre; + int err; + + if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule)) + return ERR_PTR(-EOPNOTSUPP); + + hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL); + if (!hmfs_rule) + return ERR_PTR(-ENOMEM); + + nat = (attr->ft == fs_hmfs->ct_nat); + ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4; + tcp = MLX5_GET(fte_match_param, spec->match_value, + outer_headers.ip_protocol) == IPPROTO_TCP; + gre = MLX5_GET(fte_match_param, spec->match_value, + outer_headers.ip_protocol) == IPPROTO_GRE; + + hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre); + if (IS_ERR(hmfs_matcher)) { + err = PTR_ERR(hmfs_matcher); + goto err_free_rule; + } + hmfs_rule->hmfs_matcher = hmfs_matcher; + + mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions); + hmfs_rule->counter = attr->counter; + + hmfs_rule->hws_bwc_rule = + mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params, + spec->flow_context.flow_source, rule_actions); + if (!hmfs_rule->hws_bwc_rule) { + err = -EINVAL; + goto err_put_matcher; + } + + return &hmfs_rule->fs_rule; + +err_put_matcher: + mlx5_fc_put_hws_action(hmfs_rule->counter); + mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher); +err_free_rule: + kfree(hmfs_rule); + return ERR_PTR(err); +} + +static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule) +{ + struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule, + struct mlx5_ct_fs_hmfs_rule, + fs_rule); + mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule); + mlx5_fc_put_hws_action(hmfs_rule->counter); + mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher); + kfree(hmfs_rule); +} + +static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule, + struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr) +{ + struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule, + struct mlx5_ct_fs_hmfs_rule, + fs_rule); + struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES]; + struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs); + int err; + + mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions); + + err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions); + if (err) { + mlx5_fc_put_hws_action(attr->counter); + return err; + } + + mlx5_fc_put_hws_action(hmfs_rule->counter); + hmfs_rule->counter = attr->counter; + + return 0; +} + +static struct mlx5_ct_fs_ops hmfs_ops = { + .ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add, + .ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del, + .ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update, + + .init = mlx5_ct_fs_hmfs_init, + .destroy = mlx5_ct_fs_hmfs_destroy, + + .priv_size = sizeof(struct mlx5_ct_fs_hmfs), +}; + +struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void) +{ + return &hmfs_ops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c index 45737d039252..0c97c5899904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c @@ -13,7 +13,6 @@ #define INIT_ERR_PREFIX "ct_fs_smfs init failed" #define ct_dbg(fmt, args...)\ netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args) -#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16) struct mlx5_ct_fs_smfs_matcher { struct mlx5dr_matcher *dr_matcher; @@ -220,78 +219,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs) mlx5_smfs_action_destroy(fs_smfs->fwd_action); } -static inline bool -mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys) -{ -#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name) - const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | - DISS_BIT(META); - const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | - DISS_BIT(PORTS) | DISS_BIT(TCP); - const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | - DISS_BIT(PORTS) | DISS_BIT(TCP); - const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | - DISS_BIT(PORTS); - const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | - DISS_BIT(PORTS); - const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS); - const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS); - - return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp || - used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre); -} - -static bool -mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule) -{ - struct flow_match_ipv4_addrs ipv4_addrs; - struct flow_match_ipv6_addrs ipv6_addrs; - struct flow_match_control control; - struct flow_match_basic basic; - struct flow_match_ports ports; - struct flow_match_tcp tcp; - - if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) { - ct_dbg("rule uses unexpected dissectors (0x%016llx)", - flow_rule->match.dissector->used_keys); - return false; - } - - flow_rule_match_basic(flow_rule, &basic); - flow_rule_match_control(flow_rule, &control); - flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs); - flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs); - if (basic.key->ip_proto != IPPROTO_GRE) - flow_rule_match_ports(flow_rule, &ports); - if (basic.key->ip_proto == IPPROTO_TCP) - flow_rule_match_tcp(flow_rule, &tcp); - - if (basic.mask->n_proto != htons(0xFFFF) || - (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) || - basic.mask->ip_proto != 0xFF || - (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP && - basic.key->ip_proto != IPPROTO_GRE)) { - ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)", - ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto), - basic.key->ip_proto, basic.mask->ip_proto); - return false; - } - - if (basic.key->ip_proto != IPPROTO_GRE && - (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) { - ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)", - ports.mask->src, ports.mask->dst); - return false; - } - - if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) { - ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags); - return false; - } - - return true; -} - static struct mlx5_ct_fs_rule * mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr, struct flow_rule *flow_rule) @@ -304,7 +231,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, int num_actions = 0, err; bool nat, tcp, ipv4, gre; - if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule)) + if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule)) return ERR_PTR(-EOPNOTSUPP); smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index a84ebac2f011..a065e8fafb1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2065,10 +2065,19 @@ mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv) struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get(); int err; - if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB && - ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) { - ct_dbg("Using SMFS ct flow steering provider"); - fs_ops = mlx5_ct_fs_smfs_ops_get(); + if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) { + if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_HMFS) { + ct_dbg("Using HMFS ct flow steering provider"); + fs_ops = mlx5_ct_fs_hmfs_ops_get(); + } else if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) { + ct_dbg("Using SMFS ct flow steering provider"); + fs_ops = mlx5_ct_fs_smfs_ops_get(); + } + + if (!fs_ops) { + ct_dbg("Requested flow steering mode is not enabled."); + return -EOPNOTSUPP; + } } ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL); @@ -2421,3 +2430,74 @@ out_inc_drop: atomic_inc(&ct_priv->debugfs.stats.rx_dropped); return false; } + +static bool mlx5e_tc_ct_valid_used_dissector_keys(const u64 used_keys) +{ +#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name) + const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | + DISS_BIT(META); + const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | + DISS_BIT(PORTS) | DISS_BIT(TCP); + const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | + DISS_BIT(PORTS) | DISS_BIT(TCP); + const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | + DISS_BIT(PORTS); + const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | + DISS_BIT(PORTS); + const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS); + const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS); + + return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp || + used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre); +} + +bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule) +{ + struct flow_match_ipv4_addrs ipv4_addrs; + struct flow_match_ipv6_addrs ipv6_addrs; + struct flow_match_control control; + struct flow_match_basic basic; + struct flow_match_ports ports; + struct flow_match_tcp tcp; + + if (!mlx5e_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) { + netdev_dbg(dev, "ct_debug: rule uses unexpected dissectors (0x%016llx)", + flow_rule->match.dissector->used_keys); + return false; + } + + flow_rule_match_basic(flow_rule, &basic); + flow_rule_match_control(flow_rule, &control); + flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs); + flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs); + if (basic.key->ip_proto != IPPROTO_GRE) + flow_rule_match_ports(flow_rule, &ports); + if (basic.key->ip_proto == IPPROTO_TCP) + flow_rule_match_tcp(flow_rule, &tcp); + + if (basic.mask->n_proto != htons(0xFFFF) || + (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) || + basic.mask->ip_proto != 0xFF || + (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP && + basic.key->ip_proto != IPPROTO_GRE)) { + netdev_dbg(dev, "ct_debug: rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)", + ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto), + basic.key->ip_proto, basic.mask->ip_proto); + return false; + } + + if (basic.key->ip_proto != IPPROTO_GRE && + (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) { + netdev_dbg(dev, "ct_debug: rule uses ports match (src 0x%04x, dst 0x%04x)", + ports.mask->src, ports.mask->dst); + return false; + } + + if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) { + netdev_dbg(dev, "ct_debug: rule uses unexpected tcp match (flags 0x%02x)", + tcp.mask->flags); + return false; + } + + return true; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index b66c5f98067f..5e9dbdd4a5e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -128,6 +128,9 @@ bool mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, struct sk_buff *skb, u8 zone_restore_id); +#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16) +bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule); + #else /* CONFIG_MLX5_TC_CT */ static inline struct mlx5_tc_ct_priv * @@ -202,5 +205,12 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, return false; } +static inline bool +mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, + struct flow_rule *flow_rule) +{ + return false; +} + #endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */ #endif /* __MLX5_EN_TC_CT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index ca92e518be76..501709ac310f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -94,25 +94,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) u32 esn, esn_msb; u8 overlap; - switch (x->xso.type) { - case XFRM_DEV_OFFLOAD_PACKET: - switch (x->xso.dir) { - case XFRM_DEV_OFFLOAD_IN: - esn = x->replay_esn->seq; - esn_msb = x->replay_esn->seq_hi; - break; - case XFRM_DEV_OFFLOAD_OUT: - esn = x->replay_esn->oseq; - esn_msb = x->replay_esn->oseq_hi; - break; - default: - WARN_ON(true); - return false; - } - break; - case XFRM_DEV_OFFLOAD_CRYPTO: - /* Already parsed by XFRM core */ + switch (x->xso.dir) { + case XFRM_DEV_OFFLOAD_IN: esn = x->replay_esn->seq; + esn_msb = x->replay_esn->seq_hi; + break; + case XFRM_DEV_OFFLOAD_OUT: + esn = x->replay_esn->oseq; + esn_msb = x->replay_esn->oseq_hi; break; default: WARN_ON(true); @@ -121,11 +110,15 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) overlap = sa_entry->esn_state.overlap; - if (esn >= x->replay_esn->replay_window) - seq_bottom = esn - x->replay_esn->replay_window + 1; + if (!x->replay_esn->replay_window) { + seq_bottom = esn; + } else { + if (esn >= x->replay_esn->replay_window) + seq_bottom = esn - x->replay_esn->replay_window + 1; - if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO) - esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom)); + if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO) + esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom)); + } if (sa_entry->esn_state.esn_msb) sa_entry->esn_state.esn = esn; @@ -724,6 +717,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, /* check esn */ if (x->props.flags & XFRM_STATE_ESN) mlx5e_ipsec_update_esn_state(sa_entry); + else + /* According to RFC4303, section "3.3.3. Sequence Number Generation", + * the first packet sent using a given SA will contain a sequence + * number of 1. + */ + sa_entry->esn_state.esn = 1; mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs); @@ -768,9 +767,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, MLX5_IPSEC_RESCHED); if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && - x->props.mode == XFRM_MODE_TUNNEL) - xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id, - MLX5E_IPSEC_TUNNEL_SA); + x->props.mode == XFRM_MODE_TUNNEL) { + xa_lock_bh(&ipsec->sadb); + __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id, + MLX5E_IPSEC_TUNNEL_SA); + xa_unlock_bh(&ipsec->sadb); + } out: x->xso.offload_handle = (unsigned long)sa_entry; @@ -797,7 +799,6 @@ err_xfrm: static void mlx5e_xfrm_del_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; struct mlx5e_ipsec *ipsec = sa_entry->ipsec; struct mlx5e_ipsec_sa_entry *old; @@ -806,12 +807,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id); WARN_ON(old != sa_entry); - - if (attrs->mode == XFRM_MODE_TUNNEL && - attrs->type == XFRM_DEV_OFFLOAD_PACKET) - /* Make sure that no ARP requests are running in parallel */ - flush_workqueue(ipsec->wq); - } static void mlx5e_xfrm_free_state(struct xfrm_state *x) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index e51b03d4c717..e7b64679f121 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -194,7 +194,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.flags = FLOW_ACT_NO_APPEND; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; @@ -223,7 +223,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, } sa_entry->ipsec_rule.trailer.fc = flow_counter; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { @@ -275,7 +275,7 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.flags = FLOW_ACT_NO_APPEND; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; @@ -348,7 +348,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); @@ -686,7 +686,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, rx->ft.status = ft; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); + dest[1].counter = rx->fc->cnt; err = mlx5_ipsec_rx_status_create(ipsec, rx, dest); if (err) goto err_add; @@ -873,7 +873,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx->fc->cnt); + dest.counter = tx->fc->cnt; fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1); if (IS_ERR(fte)) { err = PTR_ERR(fte); @@ -1649,7 +1649,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[0].ft = rx->ft.status; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[1].counter_id = mlx5_fc_id(counter); + dest[1].counter = counter; rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1718,23 +1718,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) goto err_alloc; } - if (attrs->family == AF_INET) - setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); - else - setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); - setup_fte_no_frags(spec); setup_fte_upper_proto_match(spec, &attrs->upspec); switch (attrs->type) { case XFRM_DEV_OFFLOAD_CRYPTO: + if (attrs->family == AF_INET) + setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + else + setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); setup_fte_spi(spec, attrs->spi, false); setup_fte_esp(spec); setup_fte_reg_a(spec); break; case XFRM_DEV_OFFLOAD_PACKET: - if (attrs->reqid) - setup_fte_reg_c4(spec, attrs->reqid); + setup_fte_reg_c4(spec, attrs->reqid); err = setup_pkt_reformat(ipsec, attrs, &flow_act); if (err) goto err_pkt_reformat; @@ -1762,7 +1760,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) dest[0].ft = tx->ft.status; dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[1].counter_id = mlx5_fc_id(counter); + dest[1].counter = counter; rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1835,7 +1833,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop); + dest[dstn].counter = tx->fc->drop; dstn++; break; default: @@ -1913,7 +1911,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) case XFRM_POLICY_BLOCK: flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop); + dest[dstn].counter = rx->fc->drop; dstn++; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 53cfa39188cb..820debf3fbbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, - struct mlx5_accel_esp_xfrm_attrs *attrs) + struct mlx5e_ipsec_sa_entry *sa_entry) { + struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; void *aso_ctx; aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso); @@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, * active. */ MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5); - if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) + if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) { MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN); + if (!attrs->replay_esn.trigger) + MLX5_SET(ipsec_aso, aso_ctx, mode_parameter, + sa_entry->esn_state.esn); + } if (attrs->lft.hard_packet_limit != XFRM_INF) { MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt, @@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) res = &mdev->mlx5e_res.hw_objs; if (attrs->type == XFRM_DEV_OFFLOAD_PACKET) - mlx5e_ipsec_packet_setup(obj, res->pdn, attrs); + mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index cc9bcc420032..6ab02f3fc291 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx, { struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs; + const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc; struct mlx5_macsec_rule_attrs rule_attrs; union mlx5_macsec_rule *macsec_rule; + if (is_tx && tx_sc->encoding_sa != sa->assoc_num) + return 0; + rule_attrs.macsec_obj_id = sa->macsec_obj_id; rule_attrs.sci = sa->sci; rule_attrs.assoc_num = sa->assoc_num; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dd16d73000c3..a814b63ed97e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2087,7 +2087,7 @@ static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c, struct mlx5e_xdpsq *xdpsq; int err; - xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, c->cpu); + xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu)); if (!xdpsq) return ERR_PTR(-ENOMEM); @@ -3946,6 +3946,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) } stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer; + stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards); stats->rx_length_errors = PPORT_802_3_GET(pstats, a_in_range_length_errors) + @@ -6542,8 +6543,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev) mlx5_core_uplink_netdev_set(mdev, NULL); mlx5e_dcbnl_delete_app(priv); - unregister_netdev(priv->netdev); - _mlx5e_suspend(adev, false); + /* When unload driver, the netdev is in registered state + * if it's from legacy mode. If from switchdev mode, it + * is already unregistered before changing to NIC profile. + */ + if (priv->netdev->reg_state == NETREG_REGISTERED) { + unregister_netdev(priv->netdev); + _mlx5e_suspend(adev, false); + } else { + struct mlx5_core_dev *pos; + int i; + + if (test_bit(MLX5E_STATE_DESTROYING, &priv->state)) + mlx5_sd_for_each_dev(i, mdev, pos) + mlx5e_destroy_mdev_resources(pos); + else + _mlx5e_suspend(adev, true); + } /* Avoid cleanup if profile rollback failed. */ if (priv->profile) priv->profile->cleanup(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 554f9cb5b53f..fdff9fd8a89e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1509,6 +1509,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv) priv = netdev_priv(netdev); + /* This bit is set when using devlink to change eswitch mode from + * switchdev to legacy. As need to keep uplink netdev ifindex, we + * detach uplink representor profile and attach NIC profile only. + * The netdev will be unregistered later when unload NIC auxiliary + * driver for this case. + * We explicitly block devlink eswitch mode change if any IPSec rules + * offloaded, but can't block other cases, such as driver unload + * and devlink reload. We have to unregister netdev before profile + * change for those cases. This is to avoid resource leak because + * the offloaded rules don't have the chance to be unoffloaded before + * cleanup which is triggered by detach uplink representor profile. + */ + if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY)) + unregister_netdev(netdev); + mlx5e_netdev_attach_nic_profile(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 6b3b1afe8312..9ba99609999f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1282,7 +1282,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[dest_ix].counter_id = mlx5_fc_id(attr->counter); + dest[dest_ix].counter = attr->counter; dest_ix++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c index 6b4c9ffad95b..7dd1dc3f77c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, if (drop_counter) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter); + drop_ctr_dst.counter = drop_counter; dst = &drop_ctr_dst; dest_num++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 093ed86a0acd..1c37098e09ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, if (counter) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter_id = mlx5_fc_id(counter); + drop_ctr_dst.counter = counter; dst = &drop_ctr_dst; dest_num++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index c5ea1d1d2b03..5f647358a05c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -570,7 +570,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge) static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge_vlan *vlan, + struct mlx5_fc *counter, struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw) { @@ -628,7 +629,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dests[0].ft = bridge->egress_ft; dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dests[1].counter_id = counter_id; + dests[1].counter = counter; handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests, ARRAY_SIZE(dests)); @@ -639,17 +640,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge_vlan *vlan, + struct mlx5_fc *counter, struct mlx5_esw_bridge *bridge) { - return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter, bridge, bridge->br_offloads->esw); } static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge_vlan *vlan, + struct mlx5_fc *counter, struct mlx5_esw_bridge *bridge) { struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos; @@ -671,7 +674,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id, goto out; } - handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter, bridge, peer_esw); out: @@ -1385,10 +1388,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow handle = peer ? mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id, - addr, vlan, mlx5_fc_id(counter), - bridge) : + addr, vlan, counter, bridge) : mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, - mlx5_fc_id(counter), bridge); + counter, bridge); if (IS_ERR(handle)) { err = PTR_ERR(handle); esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c index 5a0047bdcb51..ed977ae75fab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) unsigned long i; int err; - xa_for_each(&esw->offloads.vport_reps, i, rep) { - rpriv = rep->rep_data[REP_ETH].priv; - if (!rpriv || !rpriv->netdev) + mlx5_esw_for_each_rep(esw, i, rep) { + if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) continue; + rpriv = rep->rep_data[REP_ETH].priv; rhashtable_walk_enter(&rpriv->tc_ht, &iter); rhashtable_walk_start(&iter); while ((flow = rhashtable_walk_next(&iter)) != NULL) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a83d41121db6..8573d36785f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -714,6 +714,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\ (last) - 1) +#define mlx5_esw_for_each_rep(esw, i, rep) \ + xa_for_each(&((esw)->offloads.vport_reps), i, rep) + struct mlx5_eswitch *__must_check mlx5_devlink_eswitch_get(struct devlink *devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d5b42b3a19fd..20cc01ceee8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -53,9 +53,6 @@ #include "lag/lag.h" #include "en/tc/post_meter.h" -#define mlx5_esw_for_each_rep(esw, i, rep) \ - xa_for_each(&((esw)->offloads.vport_reps), i, rep) - /* There are two match-all miss flows, one for unicast dst mac and * one for multicast. */ @@ -724,7 +721,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[i].counter_id = mlx5_fc_id(attr->counter); + dest[i].counter = attr->counter; i++; } @@ -3780,6 +3777,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, esw->eswitch_operation_in_progress = true; up_write(&esw->mode_lock); + if (mode == DEVLINK_ESWITCH_MODE_LEGACY) + esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY; mlx5_eswitch_disable_locked(esw); if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { if (mlx5_devlink_trap_get_num_active(esw->dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 676005854dad..ae20c061e0fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, int err; if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && - underlay_qpn == 0) + underlay_qpn == 0 && + (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX)) return 0; if (ft->type == FS_FT_FDB && @@ -718,7 +719,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, continue; MLX5_SET(flow_counter_list, in_dests, flow_counter_id, - dst->dest_attr.counter_id); + mlx5_fc_id(dst->dest_attr.counter)); in_dests += dst_cnt_size; list_size++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2eabfcc247c6..22dc23d991d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -658,6 +658,7 @@ static void del_sw_hw_rule(struct fs_node *node) BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; + mlx5_fc_local_destroy(rule->dest_attr.counter); goto out; } @@ -820,11 +821,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) return index; fte->index = index + fg->start_index; +retry_insert: ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte); - if (ret) + if (ret) { + if (ret == -EBUSY) { + cond_resched(); + goto retry_insert; + } goto err_ida_remove; + } tree_add_node(&fte->node, &fg->node); list_add_tail(&fte->node.list, &fg->node.children); @@ -2709,6 +2716,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, break; case MLX5_FLOW_NAMESPACE_RDMA_TX: root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_BYPASS_PRIO; break; case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: root_ns = steering->rdma_rx_root_ns; @@ -3528,35 +3536,42 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id, { struct mlx5_core_dev *dev = devlink_priv(devlink); char *value = val.vstr; - int err = 0; + u8 eswitch_mode; - if (!strcmp(value, "dmfs")) { + if (!strcmp(value, "dmfs")) return 0; - } else if (!strcmp(value, "smfs")) { - u8 eswitch_mode; - bool smfs_cap; - eswitch_mode = mlx5_eswitch_mode(dev); - smfs_cap = mlx5_fs_dr_is_supported(dev); + if (!strcmp(value, "smfs")) { + bool smfs_cap = mlx5_fs_dr_is_supported(dev); if (!smfs_cap) { - err = -EOPNOTSUPP; NL_SET_ERR_MSG_MOD(extack, "Software managed steering is not supported by current device"); + return -EOPNOTSUPP; } + } else if (!strcmp(value, "hmfs")) { + bool hmfs_cap = mlx5_fs_hws_is_supported(dev); - else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) { + if (!hmfs_cap) { NL_SET_ERR_MSG_MOD(extack, - "Software managed steering is not supported when eswitch offloads enabled."); - err = -EOPNOTSUPP; + "Hardware steering is not supported by current device"); + return -EOPNOTSUPP; } } else { NL_SET_ERR_MSG_MOD(extack, - "Bad parameter: supported values are [\"dmfs\", \"smfs\"]"); - err = -EINVAL; + "Bad parameter: supported values are [\"dmfs\", \"smfs\", \"hmfs\"]"); + return -EINVAL; } - return err; + eswitch_mode = mlx5_eswitch_mode(dev); + if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Moving to %s is not supported when eswitch offloads enabled.", + value); + return -EOPNOTSUPP; + } + + return 0; } static int mlx5_fs_mode_set(struct devlink *devlink, u32 id, @@ -3568,6 +3583,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id, if (!strcmp(ctx->val.vstr, "smfs")) mode = MLX5_FLOW_STEERING_MODE_SMFS; + else if (!strcmp(ctx->val.vstr, "hmfs")) + mode = MLX5_FLOW_STEERING_MODE_HMFS; else mode = MLX5_FLOW_STEERING_MODE_DMFS; dev->priv.steering->mode = mode; @@ -3580,10 +3597,17 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id, { struct mlx5_core_dev *dev = devlink_priv(devlink); - if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) + switch (dev->priv.steering->mode) { + case MLX5_FLOW_STEERING_MODE_SMFS: strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr)); - else + break; + case MLX5_FLOW_STEERING_MODE_HMFS: + strscpy(ctx->val.vstr, "hmfs", sizeof(ctx->val.vstr)); + break; + default: strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr)); + } + return 0; } @@ -3658,8 +3682,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev) goto err; } - if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && - MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) { + if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) { err = init_rdma_rx_root_ns(steering); if (err) goto err; @@ -4003,6 +4026,8 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, if (mode == MLX5_FLOW_STEERING_MODE_SMFS) cmds = mlx5_fs_cmd_get_dr_cmds(); + else if (mode == MLX5_FLOW_STEERING_MODE_HMFS) + cmds = mlx5_fs_cmd_get_hws_cmds(); else cmds = mlx5_fs_cmd_get_fw_cmds(); if (!cmds) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index bad2df0715ec..20837e526679 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -38,6 +38,7 @@ #include <linux/rhashtable.h> #include <linux/llist.h> #include <steering/sws/fs_dr.h> +#include <steering/hws/fs_hws.h> #define FDB_TC_MAX_CHAIN 3 #define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) @@ -64,6 +65,7 @@ struct mlx5_modify_hdr { enum mlx5_flow_resource_owner owner; union { struct mlx5_fs_dr_action fs_dr_action; + struct mlx5_fs_hws_action fs_hws_action; u32 id; }; }; @@ -74,6 +76,7 @@ struct mlx5_pkt_reformat { enum mlx5_flow_resource_owner owner; union { struct mlx5_fs_dr_action fs_dr_action; + struct mlx5_fs_hws_action fs_hws_action; u32 id; }; }; @@ -126,7 +129,8 @@ enum fs_fte_status { enum mlx5_flow_steering_mode { MLX5_FLOW_STEERING_MODE_DMFS, - MLX5_FLOW_STEERING_MODE_SMFS + MLX5_FLOW_STEERING_MODE_SMFS, + MLX5_FLOW_STEERING_MODE_HMFS, }; enum mlx5_flow_steering_capabilty { @@ -190,7 +194,10 @@ struct mlx5_flow_handle { /* Type of children is mlx5_flow_group */ struct mlx5_flow_table { struct fs_node node; - struct mlx5_fs_dr_table fs_dr_table; + union { + struct mlx5_fs_dr_table fs_dr_table; + struct mlx5_fs_hws_table fs_hws_table; + }; u32 id; u16 vport; unsigned int max_fte; @@ -247,7 +254,10 @@ struct fs_fte_dup { /* Type of children is mlx5_flow_rule */ struct fs_fte { struct fs_node node; - struct mlx5_fs_dr_rule fs_dr_rule; + union { + struct mlx5_fs_dr_rule fs_dr_rule; + struct mlx5_fs_hws_rule fs_hws_rule; + }; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; struct fs_fte_action act_dests; struct fs_fte_dup *dup; @@ -280,7 +290,10 @@ struct mlx5_flow_group_mask { /* Type of children is fs_fte */ struct mlx5_flow_group { struct fs_node node; - struct mlx5_fs_dr_matcher fs_dr_matcher; + union { + struct mlx5_fs_dr_matcher fs_dr_matcher; + struct mlx5_fs_hws_matcher fs_hws_matcher; + }; struct mlx5_flow_group_mask mask; u32 start_index; u32 max_ftes; @@ -293,7 +306,10 @@ struct mlx5_flow_group { struct mlx5_flow_root_namespace { struct mlx5_flow_namespace ns; enum mlx5_flow_steering_mode mode; - struct mlx5_fs_dr_domain fs_dr_domain; + union { + struct mlx5_fs_dr_domain fs_dr_domain; + struct mlx5_fs_hws_context fs_hws_context; + }; enum fs_flow_table_type table_type; struct mlx5_core_dev *dev; struct mlx5_flow_table *root_ft; @@ -303,6 +319,42 @@ struct mlx5_flow_root_namespace { const struct mlx5_flow_cmds *cmds; }; +enum mlx5_fc_type { + MLX5_FC_TYPE_ACQUIRED = 0, + MLX5_FC_TYPE_LOCAL, +}; + +struct mlx5_fc_cache { + u64 packets; + u64 bytes; + u64 lastuse; +}; + +struct mlx5_fc { + u32 id; + bool aging; + enum mlx5_fc_type type; + struct mlx5_fc_bulk *bulk; + struct mlx5_fc_cache cache; + /* last{packets,bytes} are used for calculating deltas since last reading. */ + u64 lastpackets; + u64 lastbytes; +}; + +struct mlx5_fc_bulk_hws_data { + struct mlx5hws_action *hws_action; + struct mutex lock; /* protects hws_action */ + refcount_t hws_action_refcount; +}; + +struct mlx5_fc_bulk { + struct mlx5_fs_bulk fs_bulk; + u32 base_id; + struct mlx5_fc_bulk_hws_data hws_data; + struct mlx5_fc fcs[]; +}; + +u32 mlx5_fc_get_base_id(struct mlx5_fc *counter); int mlx5_init_fc_stats(struct mlx5_core_dev *dev); void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev); void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 62d0c689796b..492775d3d193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -34,6 +34,7 @@ #include <linux/mlx5/fs.h> #include "mlx5_core.h" #include "fs_core.h" +#include "fs_pool.h" #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) @@ -43,33 +44,6 @@ #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10 -struct mlx5_fc_cache { - u64 packets; - u64 bytes; - u64 lastuse; -}; - -struct mlx5_fc { - u32 id; - bool aging; - struct mlx5_fc_bulk *bulk; - struct mlx5_fc_cache cache; - /* last{packets,bytes} are used for calculating deltas since last reading. */ - u64 lastpackets; - u64 lastbytes; -}; - -struct mlx5_fc_pool { - struct mlx5_core_dev *dev; - struct mutex pool_lock; /* protects pool lists */ - struct list_head fully_used; - struct list_head partially_used; - struct list_head unused; - int available_fcs; - int used_fcs; - int threshold; -}; - struct mlx5_fc_stats { struct xarray counters; @@ -80,13 +54,13 @@ struct mlx5_fc_stats { int bulk_query_len; bool bulk_query_alloc_failed; unsigned long next_bulk_query_alloc; - struct mlx5_fc_pool fc_pool; + struct mlx5_fs_pool fc_pool; }; -static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev); -static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool); -static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool); -static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc); +static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev); +static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool); +static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool); +static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc); static int get_init_bulk_query_len(struct mlx5_core_dev *dev) { @@ -186,6 +160,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) { struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; + if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL)) + return; + if (counter->bulk) mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter); else @@ -435,15 +412,7 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, fc_stats->sampling_interval); } -/* Flow counter bluks */ - -struct mlx5_fc_bulk { - struct list_head pool_list; - u32 base_id; - int bulk_len; - unsigned long *bitmask; - struct mlx5_fc fcs[] __counted_by(bulk_len); -}; +/* Flow counter bulks */ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, u32 id) @@ -452,16 +421,16 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, counter->id = id; } -static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk) +u32 mlx5_fc_get_base_id(struct mlx5_fc *counter) { - return bitmap_weight(bulk->bitmask, bulk->bulk_len); + return counter->bulk->base_id; } -static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) +static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev, + void *pool_ctx) { enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask; - struct mlx5_fc_bulk *bulk; - int err = -ENOMEM; + struct mlx5_fc_bulk *fc_bulk; int bulk_len; u32 base_id; int i; @@ -469,207 +438,141 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; - bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL); - if (!bulk) - goto err_alloc_bulk; - - bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), - GFP_KERNEL); - if (!bulk->bitmask) - goto err_alloc_bitmask; + fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL); + if (!fc_bulk) + return NULL; - err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id); - if (err) - goto err_mlx5_cmd_bulk_alloc; + if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len)) + goto fc_bulk_free; - bulk->base_id = base_id; - bulk->bulk_len = bulk_len; - for (i = 0; i < bulk_len; i++) { - mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i); - set_bit(i, bulk->bitmask); - } + if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id)) + goto fs_bulk_cleanup; + fc_bulk->base_id = base_id; + for (i = 0; i < bulk_len; i++) + mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i); - return bulk; + refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0); + mutex_init(&fc_bulk->hws_data.lock); + return &fc_bulk->fs_bulk; -err_mlx5_cmd_bulk_alloc: - kvfree(bulk->bitmask); -err_alloc_bitmask: - kvfree(bulk); -err_alloc_bulk: - return ERR_PTR(err); +fs_bulk_cleanup: + mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk); +fc_bulk_free: + kvfree(fc_bulk); + return NULL; } static int -mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) +mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk) { - if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) { + struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk, + struct mlx5_fc_bulk, + fs_bulk); + + if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) { mlx5_core_err(dev, "Freeing bulk before all counters were released\n"); return -EBUSY; } - mlx5_cmd_fc_free(dev, bulk->base_id); - kvfree(bulk->bitmask); - kvfree(bulk); + mlx5_cmd_fc_free(dev, fc_bulk->base_id); + mlx5_fs_bulk_cleanup(fs_bulk); + kvfree(fc_bulk); return 0; } -static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk) +static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool) { - int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len); - - if (free_fc_index >= bulk->bulk_len) - return ERR_PTR(-ENOSPC); - - clear_bit(free_fc_index, bulk->bitmask); - return &bulk->fcs[free_fc_index]; -} - -static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc) -{ - int fc_index = fc->id - bulk->base_id; - - if (test_bit(fc_index, bulk->bitmask)) - return -EINVAL; - - set_bit(fc_index, bulk->bitmask); - return 0; + fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, + fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO); } /* Flow counters pool API */ -static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev) -{ - fc_pool->dev = dev; - mutex_init(&fc_pool->pool_lock); - INIT_LIST_HEAD(&fc_pool->fully_used); - INIT_LIST_HEAD(&fc_pool->partially_used); - INIT_LIST_HEAD(&fc_pool->unused); - fc_pool->available_fcs = 0; - fc_pool->used_fcs = 0; - fc_pool->threshold = 0; -} +static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = { + .bulk_destroy = mlx5_fc_bulk_destroy, + .bulk_create = mlx5_fc_bulk_create, + .update_threshold = mlx5_fc_pool_update_threshold, +}; -static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool) +static void +mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev) { - struct mlx5_core_dev *dev = fc_pool->dev; - struct mlx5_fc_bulk *bulk; - struct mlx5_fc_bulk *tmp; - - list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list) - mlx5_fc_bulk_destroy(dev, bulk); - list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list) - mlx5_fc_bulk_destroy(dev, bulk); - list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list) - mlx5_fc_bulk_destroy(dev, bulk); + mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL); } -static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool) +static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool) { - fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, - fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO); + mlx5_fs_pool_cleanup(fc_pool); } -static struct mlx5_fc_bulk * -mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool) +static struct mlx5_fc * +mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool) { - struct mlx5_core_dev *dev = fc_pool->dev; - struct mlx5_fc_bulk *new_bulk; + struct mlx5_fs_pool_index pool_index = {}; + struct mlx5_fc_bulk *fc_bulk; + int err; - new_bulk = mlx5_fc_bulk_create(dev); - if (!IS_ERR(new_bulk)) - fc_pool->available_fcs += new_bulk->bulk_len; - mlx5_fc_pool_update_threshold(fc_pool); - return new_bulk; + err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index); + if (err) + return ERR_PTR(err); + fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk); + return &fc_bulk->fcs[pool_index.index]; } static void -mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk) +mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc) { + struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk; + struct mlx5_fs_pool_index pool_index = {}; struct mlx5_core_dev *dev = fc_pool->dev; - fc_pool->available_fcs -= bulk->bulk_len; - mlx5_fc_bulk_destroy(dev, bulk); - mlx5_fc_pool_update_threshold(fc_pool); + pool_index.fs_bulk = fs_bulk; + pool_index.index = fc->id - fc->bulk->base_id; + if (mlx5_fs_pool_release_index(fc_pool, &pool_index)) + mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); } -static struct mlx5_fc * -mlx5_fc_pool_acquire_from_list(struct list_head *src_list, - struct list_head *next_list, - bool move_non_full_bulk) +/** + * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which + * was already acquired using its counter id and bulk data. + * + * @counter_id: counter acquired counter id + * @offset: counter offset from bulk base + * @bulk_size: counter's bulk size as was allocated + * + * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise. + */ +struct mlx5_fc * +mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size) { - struct mlx5_fc_bulk *bulk; - struct mlx5_fc *fc; - - if (list_empty(src_list)) - return ERR_PTR(-ENODATA); - - bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list); - fc = mlx5_fc_bulk_acquire_fc(bulk); - if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0) - list_move(&bulk->pool_list, next_list); - return fc; -} + struct mlx5_fc_bulk *fc_bulk; + struct mlx5_fc *counter; -static struct mlx5_fc * -mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool) -{ - struct mlx5_fc_bulk *new_bulk; - struct mlx5_fc *fc; - - mutex_lock(&fc_pool->pool_lock); - - fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used, - &fc_pool->fully_used, false); - if (IS_ERR(fc)) - fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused, - &fc_pool->partially_used, - true); - if (IS_ERR(fc)) { - new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool); - if (IS_ERR(new_bulk)) { - fc = ERR_CAST(new_bulk); - goto out; - } - fc = mlx5_fc_bulk_acquire_fc(new_bulk); - list_add(&new_bulk->pool_list, &fc_pool->partially_used); + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return ERR_PTR(-ENOMEM); + fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL); + if (!fc_bulk) { + kfree(counter); + return ERR_PTR(-ENOMEM); } - fc_pool->available_fcs--; - fc_pool->used_fcs++; -out: - mutex_unlock(&fc_pool->pool_lock); - return fc; + counter->type = MLX5_FC_TYPE_LOCAL; + counter->id = counter_id; + fc_bulk->base_id = counter_id - offset; + fc_bulk->fs_bulk.bulk_len = bulk_size; + counter->bulk = fc_bulk; + return counter; } +EXPORT_SYMBOL(mlx5_fc_local_create); -static void -mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc) +void mlx5_fc_local_destroy(struct mlx5_fc *counter) { - struct mlx5_core_dev *dev = fc_pool->dev; - struct mlx5_fc_bulk *bulk = fc->bulk; - int bulk_free_fcs_amount; - - mutex_lock(&fc_pool->pool_lock); - - if (mlx5_fc_bulk_release_fc(bulk, fc)) { - mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); - goto unlock; - } - - fc_pool->available_fcs++; - fc_pool->used_fcs--; - - bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk); - if (bulk_free_fcs_amount == 1) - list_move_tail(&bulk->pool_list, &fc_pool->partially_used); - if (bulk_free_fcs_amount == bulk->bulk_len) { - list_del(&bulk->pool_list); - if (fc_pool->available_fcs > fc_pool->threshold) - mlx5_fc_pool_free_bulk(fc_pool, bulk); - else - list_add(&bulk->pool_list, &fc_pool->unused); - } + if (!counter || counter->type != MLX5_FC_TYPE_LOCAL) + return; -unlock: - mutex_unlock(&fc_pool->pool_lock); + kfree(counter->bulk); + kfree(counter); } +EXPORT_SYMBOL(mlx5_fc_local_destroy); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c new file mode 100644 index 000000000000..f6c226664602 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include <mlx5_core.h> +#include "fs_pool.h" + +int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk, + int bulk_len) +{ + int i; + + fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), + GFP_KERNEL); + if (!fs_bulk->bitmask) + return -ENOMEM; + + fs_bulk->bulk_len = bulk_len; + for (i = 0; i < bulk_len; i++) + set_bit(i, fs_bulk->bitmask); + + return 0; +} + +void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk) +{ + kvfree(fs_bulk->bitmask); +} + +int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk) +{ + return bitmap_weight(bulk->bitmask, bulk->bulk_len); +} + +static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk, + struct mlx5_fs_pool_index *pool_index) +{ + int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len); + + WARN_ON_ONCE(!pool_index || !fs_bulk); + if (free_index >= fs_bulk->bulk_len) + return -ENOSPC; + + clear_bit(free_index, fs_bulk->bitmask); + pool_index->fs_bulk = fs_bulk; + pool_index->index = free_index; + return 0; +} + +static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index) +{ + if (test_bit(index, fs_bulk->bitmask)) + return -EINVAL; + + set_bit(index, fs_bulk->bitmask); + return 0; +} + +void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev, + const struct mlx5_fs_pool_ops *ops, void *pool_ctx) +{ + WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create || + !ops->update_threshold); + pool->dev = dev; + pool->pool_ctx = pool_ctx; + mutex_init(&pool->pool_lock); + INIT_LIST_HEAD(&pool->fully_used); + INIT_LIST_HEAD(&pool->partially_used); + INIT_LIST_HEAD(&pool->unused); + pool->available_units = 0; + pool->used_units = 0; + pool->threshold = 0; + pool->ops = ops; +} + +void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool) +{ + struct mlx5_core_dev *dev = pool->dev; + struct mlx5_fs_bulk *bulk; + struct mlx5_fs_bulk *tmp; + + list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list) + pool->ops->bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list) + pool->ops->bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list) + pool->ops->bulk_destroy(dev, bulk); +} + +static struct mlx5_fs_bulk * +mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool) +{ + struct mlx5_core_dev *dev = fs_pool->dev; + struct mlx5_fs_bulk *new_bulk; + + new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx); + if (new_bulk) + fs_pool->available_units += new_bulk->bulk_len; + fs_pool->ops->update_threshold(fs_pool); + return new_bulk; +} + +static void +mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk) +{ + struct mlx5_core_dev *dev = fs_pool->dev; + + fs_pool->available_units -= bulk->bulk_len; + fs_pool->ops->bulk_destroy(dev, bulk); + fs_pool->ops->update_threshold(fs_pool); +} + +static int +mlx5_fs_pool_acquire_from_list(struct list_head *src_list, + struct list_head *next_list, + bool move_non_full_bulk, + struct mlx5_fs_pool_index *pool_index) +{ + struct mlx5_fs_bulk *fs_bulk; + int err; + + if (list_empty(src_list)) + return -ENODATA; + + fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list); + err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index); + if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0) + list_move(&fs_bulk->pool_list, next_list); + return err; +} + +int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index) +{ + struct mlx5_fs_bulk *new_bulk; + int err; + + mutex_lock(&fs_pool->pool_lock); + + err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used, + &fs_pool->fully_used, false, + pool_index); + if (err) + err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused, + &fs_pool->partially_used, + true, pool_index); + if (err) { + new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool); + if (!new_bulk) { + err = -ENOENT; + goto out; + } + err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index); + WARN_ON_ONCE(err); + list_add(&new_bulk->pool_list, &fs_pool->partially_used); + } + fs_pool->available_units--; + fs_pool->used_units++; + +out: + mutex_unlock(&fs_pool->pool_lock); + return err; +} + +int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index) +{ + struct mlx5_fs_bulk *bulk = pool_index->fs_bulk; + int bulk_free_amount; + int err; + + mutex_lock(&fs_pool->pool_lock); + + /* TBD would rather return void if there was no warn here in original code */ + err = mlx5_fs_bulk_release_index(bulk, pool_index->index); + if (err) + goto unlock; + + fs_pool->available_units++; + fs_pool->used_units--; + + bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk); + if (bulk_free_amount == 1) + list_move_tail(&bulk->pool_list, &fs_pool->partially_used); + if (bulk_free_amount == bulk->bulk_len) { + list_del(&bulk->pool_list); + if (fs_pool->available_units > fs_pool->threshold) + mlx5_fs_pool_free_bulk(fs_pool, bulk); + else + list_add(&bulk->pool_list, &fs_pool->unused); + } + +unlock: + mutex_unlock(&fs_pool->pool_lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h new file mode 100644 index 000000000000..f04ec3107498 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef __MLX5_FS_POOL_H__ +#define __MLX5_FS_POOL_H__ + +#include <linux/mlx5/driver.h> + +struct mlx5_fs_bulk { + struct list_head pool_list; + int bulk_len; + unsigned long *bitmask; +}; + +struct mlx5_fs_pool_index { + struct mlx5_fs_bulk *fs_bulk; + int index; +}; + +struct mlx5_fs_pool; + +struct mlx5_fs_pool_ops { + int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk); + struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev, + void *pool_ctx); + void (*update_threshold)(struct mlx5_fs_pool *pool); +}; + +struct mlx5_fs_pool { + struct mlx5_core_dev *dev; + void *pool_ctx; + const struct mlx5_fs_pool_ops *ops; + struct mutex pool_lock; /* protects pool lists */ + struct list_head fully_used; + struct list_head partially_used; + struct list_head unused; + int available_units; + int used_units; + int threshold; +}; + +int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk, + int bulk_len); +void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk); +int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk); + +void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev, + const struct mlx5_fs_pool_ops *ops, void *pool_ctx); +void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool); +int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index); +int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index); + +#endif /* __MLX5_FS_POOL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 76ad46bf477d..b253d1673398 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -281,6 +281,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, shampo)) { + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_SHAMPO, HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c index f4b777d4e108..62b6faa4276a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c @@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv) struct mlx5_lag *ldev; bool hash = false; bool lag_active; + int i, idx = 0; int num_ports; - int i; ldev = mlx5_lag_dev(dev); mutex_lock(&ldev->lock); lag_active = __mlx5_lag_is_active(ldev); if (lag_active) { if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) { - mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports, + mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports, &num_ports); hash = true; } else { - for (i = 0; i < ldev->ports; i++) - ports[i] = ldev->v2p_map[i]; + mlx5_ldev_for_each(i, 0, ldev) + ports[idx++] = ldev->v2p_map[i]; num_ports = ldev->ports; } } @@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv) ldev = mlx5_lag_dev(dev); mutex_lock(&ldev->lock); - for (i = 0; i < ldev->ports; i++) { - if (!ldev->pf[i].dev) - continue; + mlx5_ldev_for_each(i, 0, ldev) seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device)); - } mutex_unlock(&ldev->lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 7f68468c2e75..cea5aa314f6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -43,10 +43,6 @@ #include "mp.h" #include "mpesw.h" -enum { - MLX5_LAG_EGRESS_PORT_1 = 1, - MLX5_LAG_EGRESS_PORT_2, -}; /* General purpose, use for short periods of time. * Beware of lock dependencies (preferably, no locks should be acquired @@ -72,7 +68,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev) int num_enabled; int idx; - mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports, + mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports, &num_enabled); for (idx = 0; idx < num_enabled; idx++) active_port |= BIT_MASK(enabled_ports[idx]); @@ -80,23 +76,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev) return active_port; } -static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode, - unsigned long flags) +static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev, + int mode, unsigned long flags) { bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, &flags); int port_sel_mode = get_port_sel_mode(mode, flags); u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; + u8 *ports = ldev->v2p_map; + int idx0, idx1; void *lag_ctx; lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode); + idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0); + idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1); + + if (idx0 < 0 || idx1 < 0) + return -EINVAL; switch (port_sel_mode) { case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]); break; case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass)) @@ -113,17 +116,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode, return mlx5_cmd_exec_in(dev, create_lag, in); } -static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports, +static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev, u8 *ports) { u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); + int idx0, idx1; + + idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0); + idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1); + if (idx0 < 0 || idx1 < 0) + return -EINVAL; MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); MLX5_SET(modify_lag_in, in, field_select, 0x1); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]); return mlx5_cmd_exec_in(dev, modify_lag, in); } @@ -148,33 +157,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); -static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports, +static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev, u8 *ports, int *num_disabled) { int i; *num_disabled = 0; - for (i = 0; i < num_ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) if (!tracker->netdev_state[i].tx_enabled || !tracker->netdev_state[i].link_up) ports[(*num_disabled)++] = i; - } } -void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, +void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev, u8 *ports, int *num_enabled) { int i; *num_enabled = 0; - for (i = 0; i < num_ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) if (tracker->netdev_state[i].tx_enabled && tracker->netdev_state[i].link_up) ports[(*num_enabled)++] = i; - } if (*num_enabled == 0) - mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled); + mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled); } static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, @@ -192,7 +199,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, int j; if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { - mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports, + mlx5_infer_tx_enabled(tracker, ldev, enabled_ports, &num_enabled); for (i = 0; i < num_enabled; i++) { err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1); @@ -203,7 +210,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, buf[written - 2] = 0; mlx5_core_info(dev, "lag map active ports: %s\n", buf); } else { - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; err = scnprintf(buf + written, 10, @@ -286,13 +293,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, { int i; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (ldev->pf[i].netdev == ndev) return i; return -ENOENT; } +int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq) +{ + int i, num = 0; + + if (!ldev) + return -ENOENT; + + mlx5_ldev_for_each(i, 0, ldev) { + if (num == seq) + return i; + num++; + } + return -ENOENT; +} + +int mlx5_lag_num_devs(struct mlx5_lag *ldev) +{ + int i, num = 0; + + if (!ldev) + return 0; + + mlx5_ldev_for_each(i, 0, ldev) { + (void)i; + num++; + } + return num; +} + +int mlx5_lag_num_netdevs(struct mlx5_lag *ldev) +{ + int i, num = 0; + + if (!ldev) + return 0; + + mlx5_ldev_for_each(i, 0, ldev) + if (ldev->pf[i].netdev) + num++; + return num; +} + static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev) { return ldev->mode == MLX5_LAG_MODE_ROCE; @@ -310,7 +359,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) * with mapping that points to active ports. */ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, - u8 num_ports, + struct mlx5_lag *ldev, u8 buckets, u8 *ports) { @@ -323,7 +372,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, int i; int j; - for (i = 0; i < num_ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (tracker->netdev_state[i].tx_enabled && tracker->netdev_state[i].link_up) enabled[enabled_ports_num++] = i; @@ -334,15 +383,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, /* Use native mapping by default where each port's buckets * point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc */ - for (i = 0; i < num_ports; i++) + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < buckets; j++) { idx = i * buckets + j; - ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i; + ports[idx] = i + 1; } + } /* If all ports are disabled/enabled keep native mapping */ - if (enabled_ports_num == num_ports || - disabled_ports_num == num_ports) + if (enabled_ports_num == ldev->ports || + disabled_ports_num == ldev->ports) return; /* Go over the disabled ports and for each assign a random active port */ @@ -358,7 +408,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (ldev->pf[i].has_drop) return true; return false; @@ -368,7 +418,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (!ldev->pf[i].has_drop) continue; @@ -396,7 +446,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev, if (!ldev->tracker.has_inactive) return; - mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled); + mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled); for (i = 0; i < num_disabled; i++) { disabled_index = disabled_ports[i]; @@ -428,10 +478,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports) static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; u8 active_ports; int ret; + if (idx < 0) + return -EINVAL; + + dev0 = ldev->pf[idx].dev; if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) { ret = mlx5_lag_port_sel_modify(ldev, ports); if (ret || @@ -442,7 +497,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) return mlx5_cmd_modify_active_port(dev0, active_ports); } - return mlx5_cmd_modify_lag(dev0, ldev->ports, ports); + return mlx5_cmd_modify_lag(dev0, ldev, ports); } static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev) @@ -450,7 +505,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev struct net_device *ndev = NULL; struct mlx5_lag *ldev; unsigned long flags; - int i; + int i, last_idx; spin_lock_irqsave(&lag_lock, flags); ldev = mlx5_lag_dev(dev); @@ -458,11 +513,15 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev if (!ldev) goto unlock; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (ldev->tracker.netdev_state[i].tx_enabled) ndev = ldev->pf[i].netdev; - if (!ndev) - ndev = ldev->pf[ldev->ports - 1].netdev; + if (!ndev) { + last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1); + if (last_idx < 0) + goto unlock; + ndev = ldev->pf[last_idx].netdev; + } if (ndev) dev_hold(ndev); @@ -476,16 +535,21 @@ unlock: void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker) { + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {}; - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev0; int idx; int err; int i; int j; - mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports); + if (first_idx < 0) + return; + + dev0 = ldev->pf[first_idx].dev; + mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; if (ports[idx] == ldev->v2p_map[idx]) @@ -523,8 +587,13 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, unsigned long *flags) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; + + if (first_idx < 0) + return -EINVAL; + dev0 = ldev->pf[first_idx].dev; if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) { if (ldev->ports > 2) return -EINVAL; @@ -544,11 +613,13 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, unsigned long *flags) { - struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct lag_func *dev0; - if (mode == MLX5_LAG_MODE_MPESW) + if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW) return; + dev0 = &ldev->pf[first_idx]; if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) { if (ldev->ports > 2) @@ -593,12 +664,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_eswitch *master_esw = dev0->priv.eswitch; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_eswitch *master_esw; + struct mlx5_core_dev *dev0; + int i, j; int err; - int i; - for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) { + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; + master_esw = dev0->priv.eswitch; + mlx5_ldev_for_each(i, first_idx + 1, ldev) { struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch; err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw, @@ -608,9 +685,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev) } return 0; err: - for (; i > MLX5_LAG_P1; i--) + mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev) mlx5_eswitch_offloads_single_fdb_del_one(master_esw, - ldev->pf[i].dev->priv.eswitch); + ldev->pf[j].dev->priv.eswitch); return err; } @@ -620,16 +697,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, unsigned long flags) { bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; + struct mlx5_core_dev *dev0; int err; + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; if (tracker) mlx5_lag_print_mapping(dev0, ldev, tracker, flags); mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", shared_fdb, mlx5_get_str_port_sel_mode(mode, flags)); - err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags); + err = mlx5_cmd_create_lag(dev0, ldev, mode, flags); if (err) { mlx5_core_err(dev0, "Failed to create LAG (%d)\n", @@ -661,17 +743,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, bool shared_fdb) { + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); bool roce_lag = mode == MLX5_LAG_MODE_ROCE; - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev0; unsigned long flags = 0; int err; + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags); if (err) return err; if (mode != MLX5_LAG_MODE_MPESW) { - mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map); + mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map); if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, ldev->v2p_map); @@ -709,20 +796,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, int mlx5_deactivate_lag(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_eswitch *master_esw = dev0->priv.eswitch; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; bool roce_lag = __mlx5_lag_is_roce(ldev); unsigned long flags = ldev->mode_flags; + struct mlx5_eswitch *master_esw; + struct mlx5_core_dev *dev0; int err; int i; + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; + master_esw = dev0->priv.eswitch; ldev->mode = MLX5_LAG_MODE_NONE; ldev->mode_flags = 0; mlx5_lag_mp_reset(ldev); if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) { - for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) + mlx5_ldev_for_each(i, first_idx + 1, ldev) mlx5_eswitch_offloads_single_fdb_del_one(master_esw, ldev->pf[i].dev->priv.eswitch); clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); @@ -754,6 +847,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev) bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) { + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); #ifdef CONFIG_MLX5_ESWITCH struct mlx5_core_dev *dev; u8 mode; @@ -761,30 +855,29 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) bool roce_support; int i; - for (i = 0; i < ldev->ports; i++) - if (!ldev->pf[i].dev) - return false; + if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports) + return false; #ifdef CONFIG_MLX5_ESWITCH - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev)) return false; } - dev = ldev->pf[MLX5_LAG_P1].dev; + dev = ldev->pf[first_idx].dev; mode = mlx5_eswitch_mode(dev); - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode) return false; #else - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (mlx5_sriov_is_enabled(ldev->pf[i].dev)) return false; #endif - roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev); - for (i = 1; i < ldev->ports; i++) + roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev); + mlx5_ldev_for_each(i, first_idx + 1, ldev) if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support) return false; @@ -795,10 +888,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) { - if (!ldev->pf[i].dev) - continue; - + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) continue; @@ -812,10 +902,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) { - if (!ldev->pf[i].dev) - continue; - + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) continue; @@ -828,11 +915,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev) void mlx5_disable_lag(struct mlx5_lag *ldev) { bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; bool roce_lag; int err; int i; + if (idx < 0) + return; + + dev0 = ldev->pf[idx].dev; roce_lag = __mlx5_lag_is_roce(ldev); if (shared_fdb) { @@ -842,7 +934,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); } - for (i = 1; i < ldev->ports; i++) + mlx5_ldev_for_each(i, idx + 1, ldev) mlx5_nic_vport_disable_roce(ldev->pf[i].dev); } @@ -854,17 +946,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) mlx5_lag_add_devices(ldev); if (shared_fdb) - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); } static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) { + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_core_dev *dev; int i; - for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) { + if (idx < 0) + return false; + + mlx5_ldev_for_each(i, idx + 1, ldev) { dev = ldev->pf[i].dev; if (is_mdev_switchdev_mode(dev) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && @@ -876,7 +972,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) return false; } - dev = ldev->pf[MLX5_LAG_P1].dev; + dev = ldev->pf[idx].dev; if (is_mdev_switchdev_mode(dev) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) && @@ -892,11 +988,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev) bool roce_lag = true; int i; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev); #ifdef CONFIG_MLX5_ESWITCH - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev); #endif @@ -917,13 +1013,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond) static void mlx5_do_bond(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct lag_tracker tracker = { }; + struct mlx5_core_dev *dev0; struct net_device *ndev; bool do_bond, roce_lag; int err; int i; + if (idx < 0) + return; + + dev0 = ldev->pf[idx].dev; if (!mlx5_lag_is_ready(ldev)) { do_bond = false; } else { @@ -956,7 +1057,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } else if (roce_lag) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - for (i = 1; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, idx + 1, ldev) { if (mlx5_get_roce_state(ldev->pf[i].dev)) mlx5_nic_vport_enable_roce(ldev->pf[i].dev); } @@ -966,7 +1067,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) break; @@ -977,7 +1078,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_rescan_drivers_locked(dev0); mlx5_deactivate_lag(ldev); mlx5_lag_add_devices(ldev); - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_core_err(dev0, "Failed to enable lag\n"); return; @@ -1010,12 +1111,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev) int i; mutex_lock(&ldev->lock); - for (i = 0; i < ldev->ports; i++) { - if (ldev->pf[i].dev) { - devcom = ldev->pf[i].dev->priv.hca_devcom_comp; - break; - } - } + i = mlx5_get_next_ldev_func(ldev, 0); + if (i < MLX5_MAX_PORTS) + devcom = ldev->pf[i].dev->priv.hca_devcom_comp; mutex_unlock(&ldev->lock); return devcom; } @@ -1068,7 +1166,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, u8 bond_status = 0; int num_slaves = 0; int changed = 0; - int idx; + int i, idx = -1; if (!netif_is_lag_master(upper)) return 0; @@ -1083,8 +1181,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, */ rcu_read_lock(); for_each_netdev_in_bond_rcu(upper, ndev_tmp) { - idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp); - if (idx >= 0) { + mlx5_ldev_for_each(i, 0, ldev) { + if (ldev->pf[i].netdev == ndev_tmp) { + idx++; + break; + } + } + if (i < MLX5_MAX_PORTS) { slave = bond_slave_get_rcu(ndev_tmp); if (slave) has_inactive |= bond_is_slave_inactive(slave); @@ -1234,15 +1337,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, } static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev, - struct mlx5_core_dev *dev, - struct net_device *netdev) + struct mlx5_core_dev *dev, + struct net_device *netdev) { unsigned int fn = mlx5_get_dev_index(dev); unsigned long flags; - if (fn >= ldev->ports) - return; - spin_lock_irqsave(&lag_lock, flags); ldev->pf[fn].netdev = netdev; ldev->tracker.netdev_state[fn].link_up = 0; @@ -1257,7 +1357,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev, int i; spin_lock_irqsave(&lag_lock, flags); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].netdev == netdev) { ldev->pf[i].netdev = NULL; break; @@ -1267,13 +1367,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev, } static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, - struct mlx5_core_dev *dev) + struct mlx5_core_dev *dev) { unsigned int fn = mlx5_get_dev_index(dev); - if (fn >= ldev->ports) - return; - ldev->pf[fn].dev = dev; dev->priv.lag = ldev; } @@ -1281,16 +1378,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, struct mlx5_core_dev *dev) { - int i; - - for (i = 0; i < ldev->ports; i++) - if (ldev->pf[i].dev == dev) - break; + int fn; - if (i == ldev->ports) + fn = mlx5_get_dev_index(dev); + if (ldev->pf[fn].dev != dev) return; - ldev->pf[i].dev = NULL; + ldev->pf[fn].dev = NULL; dev->priv.lag = NULL; } @@ -1398,7 +1492,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev) { struct mlx5_lag *ldev; - int i; + int num = 0; ldev = mlx5_lag_dev(dev); if (!ldev) @@ -1406,17 +1500,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, mutex_lock(&ldev->lock); mlx5_ldev_add_netdev(ldev, dev, netdev); - - for (i = 0; i < ldev->ports; i++) - if (!ldev->pf[i].netdev) - break; - - if (i >= ldev->ports) + num = mlx5_lag_num_netdevs(ldev); + if (num >= ldev->ports) set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags); mutex_unlock(&ldev->lock); mlx5_queue_bond_work(ldev, 0); } +int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx) +{ + int i; + + for (i = start_idx; i >= end_idx; i--) + if (ldev->pf[i].dev) + return i; + return -1; +} + +int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx) +{ + int i; + + for (i = start_idx; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + return i; + return MLX5_MAX_PORTS; +} + bool mlx5_lag_is_roce(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; @@ -1467,12 +1577,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; unsigned long flags; - bool res; + bool res = false; + int idx; spin_lock_irqsave(&lag_lock, flags); ldev = mlx5_lag_dev(dev); - res = ldev && __mlx5_lag_is_active(ldev) && - dev == ldev->pf[MLX5_LAG_P1].dev; + idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev; spin_unlock_irqrestore(&lag_lock, flags); return res; @@ -1555,7 +1666,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, if (!(ldev && __mlx5_lag_is_roce(ldev))) goto unlock; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].netdev == slave) { port = i; break; @@ -1594,13 +1705,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int if (!ldev) goto unlock; - if (*i == ldev->ports) + if (*i == MLX5_MAX_PORTS) goto unlock; - for (idx = *i; idx < ldev->ports; idx++) + mlx5_ldev_for_each(idx, *i, ldev) if (ldev->pf[idx].dev != dev) break; - if (idx == ldev->ports) { + if (idx == MLX5_MAX_PORTS) { *i = idx; goto unlock; } @@ -1621,10 +1732,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, { int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out); struct mlx5_core_dev **mdev; + int ret = 0, i, j, idx = 0; struct mlx5_lag *ldev; unsigned long flags; int num_ports; - int ret, i, j; void *out; out = kvzalloc(outlen, GFP_KERNEL); @@ -1643,8 +1754,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ldev = mlx5_lag_dev(dev); if (ldev && __mlx5_lag_is_active(ldev)) { num_ports = ldev->ports; - for (i = 0; i < ldev->ports; i++) - mdev[i] = ldev->pf[i].dev; + mlx5_ldev_for_each(i, 0, ldev) + mdev[idx++] = ldev->pf[i].dev; } else { num_ports = 1; mdev[MLX5_LAG_P1] = dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 50fcb1eee574..01cf72366947 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -103,7 +103,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev); char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags); -void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, +void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev, u8 *ports, int *num_enabled); void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev); @@ -119,9 +119,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev) if (!MLX5_CAP_GEN(dev, vport_group_manager) || !MLX5_CAP_GEN(dev, lag_master) || MLX5_CAP_GEN(dev, num_lag_ports) < 2 || + mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS || MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) return false; return true; } +#define mlx5_ldev_for_each(i, start_index, ldev) \ + for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \ + i = tmp, tmp < MLX5_MAX_PORTS; tmp++) + +#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \ + for (int tmp = start_index, tmp1 = end_index; \ + tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \ + i = tmp, tmp >= tmp1; tmp--) + +int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx); +int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx); +int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq); +int mlx5_lag_num_devs(struct mlx5_lag *ldev); +int mlx5_lag_num_netdevs(struct mlx5_lag *ldev); #endif /* __MLX5_LAG_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index b1aa494c76ba..aee17fcf3b36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) #define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2 static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) { - if (!mlx5_lag_is_ready(ldev)) + int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2); + + if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev)) return false; if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev)) @@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS) return false; - return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev, - ldev->pf[MLX5_LAG_P2].dev); + return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev, + ldev->pf[idx1].dev); } bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) @@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, enum mlx5_lag_port_affinity port) { + int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2); struct lag_tracker tracker = {}; - if (!__mlx5_lag_is_multipath(ldev)) + if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev)) return; switch (port) { case MLX5_LAG_NORMAL_AFFINITY: - tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P1].link_up = true; - tracker.netdev_state[MLX5_LAG_P2].link_up = true; + tracker.netdev_state[idx0].tx_enabled = true; + tracker.netdev_state[idx1].tx_enabled = true; + tracker.netdev_state[idx0].link_up = true; + tracker.netdev_state[idx1].link_up = true; break; case MLX5_LAG_P1_AFFINITY: - tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P1].link_up = true; - tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false; - tracker.netdev_state[MLX5_LAG_P2].link_up = false; + tracker.netdev_state[idx0].tx_enabled = true; + tracker.netdev_state[idx0].link_up = true; + tracker.netdev_state[idx1].tx_enabled = false; + tracker.netdev_state[idx1].link_up = false; break; case MLX5_LAG_P2_AFFINITY: - tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false; - tracker.netdev_state[MLX5_LAG_P1].link_up = false; - tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P2].link_up = true; + tracker.netdev_state[idx0].tx_enabled = false; + tracker.netdev_state[idx0].link_up = false; + tracker.netdev_state[idx1].tx_enabled = true; + tracker.netdev_state[idx1].link_up = true; break; default: - mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev, + mlx5_core_warn(ldev->pf[idx0].dev, "Invalid affinity port %d", port); return; } - if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled) - mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events, + if (tracker.netdev_state[idx0].tx_enabled) + mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events, MLX5_DEV_EVENT_PORT_AFFINITY, (void *)0); - if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled) - mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events, + if (tracker.netdev_state[idx1].tx_enabled) + mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events, MLX5_DEV_EVENT_PORT_AFFINITY, (void *)0); @@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev, static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, struct fib_entry_notifier_info *fen_info) { + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct net_device *nh_dev0, *nh_dev1; struct fib_info *fi = fen_info->fi; struct lag_mp *mp = &ldev->lag_mp; + int i, dev_idx = 0; + + if (idx < 0) + return; /* Handle delete event */ if (event == FIB_EVENT_ENTRY_DEL) { @@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, } if (nh_dev0 == nh_dev1) { - mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev, + mlx5_core_warn(ldev->pf[idx].dev, "Multipath offload doesn't support routes with multiple nexthops of the same device"); return; } if (!nh_dev1) { if (__mlx5_lag_is_active(ldev)) { - int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0); - - i++; - mlx5_lag_set_port_affinity(ldev, i); + mlx5_ldev_for_each(i, 0, ldev) { + dev_idx++; + if (ldev->pf[i].netdev == nh_dev0) + break; + } + mlx5_lag_set_port_affinity(ldev, dev_idx); mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); } @@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, struct fib_info *fi) { struct lag_mp *mp = &ldev->lag_mp; + int i, dev_idx = 0; /* Check the nh event is related to the route */ if (!mp->fib.mfi || mp->fib.mfi != fi) @@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, /* nh added/removed */ if (event == FIB_EVENT_NH_DEL) { - int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev); + mlx5_ldev_for_each(i, 0, ldev) { + if (ldev->pf[i].netdev == fib_nh->fib_nh_dev) + break; + dev_idx++; + } - if (i >= 0) { - i = (i + 1) % 2 + 1; /* peer port */ - mlx5_lag_set_port_affinity(ldev, i); + if (dev_idx >= 0) { + dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */ + mlx5_lag_set_port_affinity(ldev, dev_idx); } } else if (event == FIB_EVENT_NH_ADD && fib_info_num_path(fi) == 2) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 571ea26edd0c..ffac0bd6c895 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev) u32 pf_metadata; int i; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; esw = dev->priv.eswitch; pf_metadata = ldev->lag_mpesw.pf_metadata[i]; @@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev) u32 pf_metadata; int i, err; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; esw = dev->priv.eswitch; pf_metadata = mlx5_esw_match_metadata_alloc(esw); @@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev) goto err_metadata; } - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW, (void *)0); @@ -68,13 +68,15 @@ err_metadata: #define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4 static int enable_mpesw(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; int err; int i; - if (ldev->mode != MLX5_LAG_MODE_NONE) + if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE) return -EINVAL; + dev0 = ldev->pf[idx].dev; if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS) return -EOPNOTSUPP; @@ -98,7 +100,7 @@ static int enable_mpesw(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) goto err_rescan_drivers; @@ -112,7 +114,7 @@ err_rescan_drivers: mlx5_deactivate_lag(ldev); err_add_devices: mlx5_lag_add_devices(ldev); - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_mpesw_metadata_cleanup(ldev); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index ab2717012b79..bde79cac33a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, struct mlx5_lag_definer *lag_definer, u8 *ports) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_namespace *ns; - int err, i; - int idx; - int j; + struct mlx5_core_dev *dev; + int err, i, j, k, idx; + if (first_idx < 0) + return -EINVAL; + + dev = ldev->pf[first_idx].dev; ft_attr.max_fte = ldev->ports * ldev->buckets; ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; @@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.flags |= FLOW_ACT_NO_APPEND; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { u8 affinity; @@ -88,13 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, &dest, 1); if (IS_ERR(lag_definer->rules[idx])) { err = PTR_ERR(lag_definer->rules[idx]); - do { + mlx5_ldev_for_each_reverse(k, i, 0, ldev) { while (j--) { - idx = i * ldev->buckets + j; + idx = k * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); } j = ldev->buckets; - } while (i--); + }; goto destroy_fg; } } @@ -295,11 +298,16 @@ static struct mlx5_lag_definer * mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, enum mlx5_traffic_types tt, bool tunnel, u8 *ports) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_lag_definer *lag_definer; + struct mlx5_core_dev *dev; u32 *match_definer_mask; int format_id, err; + if (first_idx < 0) + return ERR_PTR(-EINVAL); + + dev = ldev->pf[first_idx].dev; lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL); if (!lag_definer) return ERR_PTR(-ENOMEM); @@ -341,12 +349,15 @@ free_lag_definer: static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, struct mlx5_lag_definer *lag_definer) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; - int idx; - int i; - int j; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev; + int idx, i, j; - for (i = 0; i < ldev->ports; i++) { + if (first_idx < 0) + return; + + dev = ldev->pf[first_idx].dev; + mlx5_ldev_for_each(i, first_idx, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); @@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev, static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct ttc_params ttc_params = {}; + struct mlx5_core_dev *dev; + + if (first_idx < 0) + return -EINVAL; + dev = ldev->pf[first_idx].dev; mlx5_lag_set_outer_ttc_params(ldev, &ttc_params); port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params); return PTR_ERR_OR_ZERO(port_sel->outer.ttc); @@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct ttc_params ttc_params = {}; + struct mlx5_core_dev *dev; + if (first_idx < 0) + return -EINVAL; + + dev = ldev->pf[first_idx].dev; mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); return PTR_ERR_OR_ZERO(port_sel->inner.ttc); @@ -530,7 +551,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, set_tt_map(port_sel, hash_type); err = mlx5_lag_create_definers(ldev, hash_type, ports); if (err) - return err; + goto clear_port_sel; if (port_sel->tunnel) { err = mlx5_lag_create_inner_ttc_table(ldev); @@ -549,6 +570,8 @@ destroy_inner: mlx5_destroy_ttc_table(port_sel->inner.ttc); destroy_definers: mlx5_lag_destroy_definers(ldev); +clear_port_sel: + memset(port_sel, 0, sizeof(*port_sel)); return err; } @@ -565,7 +588,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; if (ldev->v2p_map[idx] == ports[idx]) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 4822d01123b4..d61a1a9297c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -322,17 +322,16 @@ static void mlx5_pps_out(struct work_struct *work) } } -static void mlx5_timestamp_overflow(struct work_struct *work) +static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info) { - struct delayed_work *dwork = to_delayed_work(work); struct mlx5_core_dev *mdev; struct mlx5_timer *timer; struct mlx5_clock *clock; unsigned long flags; - timer = container_of(dwork, struct mlx5_timer, overflow_work); - clock = container_of(timer, struct mlx5_clock, timer); + clock = container_of(ptp_info, struct mlx5_clock, ptp_info); mdev = container_of(clock, struct mlx5_core_dev, clock); + timer = &clock->timer; if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; @@ -343,7 +342,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work) write_sequnlock_irqrestore(&clock->lock, flags); out: - schedule_delayed_work(&timer->overflow_work, timer->overflow_period); + return timer->overflow_period; } static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev, @@ -517,6 +516,7 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) timer->cycles.mult = mult; mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); + ptp_schedule_worker(clock->ptp, timer->overflow_period); return 0; } @@ -852,6 +852,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .settime64 = mlx5_ptp_settime, .enable = NULL, .verify = NULL, + .do_aux_work = mlx5_timestamp_overflow, }; static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, @@ -1052,12 +1053,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock) do_div(ns, NSEC_PER_SEC / HZ); timer->overflow_period = ns; - INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow); - if (timer->overflow_period) - schedule_delayed_work(&timer->overflow_work, 0); - else + if (!timer->overflow_period) { + timer->overflow_period = HZ; mlx5_core_warn(mdev, - "invalid overflow period, overflow_work is not scheduled\n"); + "invalid overflow period, overflow_work is scheduled once per second\n"); + } if (clock_info) clock_info->overflow_period = timer->overflow_period; @@ -1172,6 +1172,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); mlx5_eq_notifier_register(mdev, &clock->pps_nb); + + if (clock->ptp) + ptp_schedule_worker(clock->ptp, 0); } void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) @@ -1188,7 +1191,6 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) } cancel_work_sync(&clock->pps_info.out_work); - cancel_delayed_work_sync(&clock->timer.overflow_work); if (mdev->clock_info) { free_page((unsigned long)mdev->clock_info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c index 4a078113e292..762d55ba9e51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c @@ -497,7 +497,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs) memset(&dest, 0, sizeof(struct mlx5_flow_destination)); memset(&flow_act, 0, sizeof(flow_act)); dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter); + dest.counter = tx_tables->check_miss_rule_counter; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { @@ -519,7 +519,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs) flow_act.flags = FLOW_ACT_NO_APPEND; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter); + dest.counter = tx_tables->check_rule_counter; rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1200,7 +1200,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | MLX5_FLOW_CONTEXT_ACTION_COUNT; roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter); + roce_dest[dstn].counter = rx_tables->check_rule_counter; rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1); if (IS_ERR(rule)) { @@ -1592,7 +1592,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs) memset(&flow_act, 0, sizeof(flow_act)); dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter); + dest.counter = rx_tables->check_miss_rule_counter; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 220a9ac75c8b..ec956c4bcebd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -368,6 +368,10 @@ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_ty u16 opmod = (cap_type << 1) | (cap_mode & 0x01); int err; + if (WARN_ON(!dev->caps.hca[cap_type])) + /* this cap_type must be added to mlx5_hca_caps_alloc() */ + return -EINVAL; + memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) @@ -664,6 +668,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list, ilog2(max_uc_list)); + /* enable absolute native port num */ + if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num)) + MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); } @@ -941,9 +949,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, mlx5_pci_vsc_init(dev); - err = pci_enable_ptm(pdev, NULL); - if (err) - mlx5_core_info(dev, "PTM is not supported by PCIe\n"); + pci_enable_ptm(pdev, NULL); return 0; @@ -1788,6 +1794,7 @@ static const int types[] = { MLX5_CAP_MACSEC, MLX5_CAP_ADV_VIRTUALIZATION, MLX5_CAP_CRYPTO, + MLX5_CAP_SHAMPO, }; static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index e393391966e0..39a209b9b684 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -56,6 +56,8 @@ bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierar return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN; case TSAR_ELEMENT_TSAR_TYPE_ETS: return cap & TSAR_TYPE_CAP_MASK_ETS; + case TSAR_ELEMENT_TSAR_TYPE_TC_ARB: + return cap & TSAR_TYPE_CAP_MASK_TC_ARB; } return false; @@ -87,6 +89,8 @@ bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hie return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP: return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT: + return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT; } return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index a96be98be032..b96909fbeb12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, return 0; esw_err: + mlx5_sf_function_id_erase(table, sf); mlx5_sf_free(table, sf); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c index a897cdc60fdb..b5332c54d4fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c @@ -11,31 +11,29 @@ /* This is the longest supported action sequence for FDB table: * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term. */ -static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = { - [MLX5HWS_TABLE_TYPE_FDB] = { - BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) | - BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | - BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2), - BIT(MLX5HWS_ACTION_TYP_POP_VLAN), - BIT(MLX5HWS_ACTION_TYP_POP_VLAN), - BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), - BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), - BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), - BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) | - BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | - BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), - BIT(MLX5HWS_ACTION_TYP_CTR), - BIT(MLX5HWS_ACTION_TYP_TAG), - BIT(MLX5HWS_ACTION_TYP_ASO_METER), - BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), - BIT(MLX5HWS_ACTION_TYP_TBL) | - BIT(MLX5HWS_ACTION_TYP_VPORT) | - BIT(MLX5HWS_ACTION_TYP_DROP) | - BIT(MLX5HWS_ACTION_TYP_SAMPLER) | - BIT(MLX5HWS_ACTION_TYP_RANGE) | - BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY), - BIT(MLX5HWS_ACTION_TYP_LAST), - }, +static const u32 action_order_arr[MLX5HWS_ACTION_TYP_MAX] = { + BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2), + BIT(MLX5HWS_ACTION_TYP_POP_VLAN), + BIT(MLX5HWS_ACTION_TYP_POP_VLAN), + BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), + BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), + BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), + BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), + BIT(MLX5HWS_ACTION_TYP_CTR), + BIT(MLX5HWS_ACTION_TYP_TAG), + BIT(MLX5HWS_ACTION_TYP_ASO_METER), + BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), + BIT(MLX5HWS_ACTION_TYP_TBL) | + BIT(MLX5HWS_ACTION_TYP_VPORT) | + BIT(MLX5HWS_ACTION_TYP_DROP) | + BIT(MLX5HWS_ACTION_TYP_SAMPLER) | + BIT(MLX5HWS_ACTION_TYP_RANGE) | + BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY), + BIT(MLX5HWS_ACTION_TYP_LAST), }; static const char * const mlx5hws_action_type_str[] = { @@ -83,8 +81,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx, int ret; mutex_lock(&ctx->ctrl_lock); - if (ctx->common_res[tbl_type].shared_stc[stc_type]) { - ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++; + if (ctx->common_res.shared_stc[stc_type]) { + ctx->common_res.shared_stc[stc_type]->refcount++; mutex_unlock(&ctx->ctrl_lock); return 0; } @@ -124,8 +122,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx, goto free_shared_stc; } - ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc; - ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1; + ctx->common_res.shared_stc[stc_type] = shared_stc; + ctx->common_res.shared_stc[stc_type]->refcount = 1; mutex_unlock(&ctx->ctrl_lock); @@ -178,16 +176,16 @@ static void hws_action_put_shared_stc(struct mlx5hws_action *action, } mutex_lock(&ctx->ctrl_lock); - if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) { + if (--ctx->common_res.shared_stc[stc_type]->refcount) { mutex_unlock(&ctx->ctrl_lock); return; } - shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type]; + shared_stc = ctx->common_res.shared_stc[stc_type]; mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk); kfree(shared_stc); - ctx->common_res[tbl_type].shared_stc[stc_type] = NULL; + ctx->common_res.shared_stc[stc_type] = NULL; mutex_unlock(&ctx->ctrl_lock); } @@ -206,10 +204,10 @@ bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx, enum mlx5hws_action_type *user_actions, enum mlx5hws_table_type table_type) { - const u32 *order_arr = action_order_arr[table_type]; + const u32 *order_arr = action_order_arr; + bool valid_combo; u8 order_idx = 0; u8 user_idx = 0; - bool valid_combo; if (table_type >= MLX5HWS_TABLE_TYPE_MAX) { mlx5hws_err(ctx, "Invalid table_type %d", table_type); @@ -321,8 +319,8 @@ int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx, __must_hold(&ctx->ctrl_lock) { struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0}; - struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type]; struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0}; + struct mlx5hws_pool *stc_pool = ctx->stc_pool; bool use_fixup; u32 obj_0_id; int ret; @@ -387,8 +385,8 @@ void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx, struct mlx5hws_pool_chunk *stc) __must_hold(&ctx->ctrl_lock) { - struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type]; struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_pool *stc_pool = ctx->stc_pool; u32 obj_id; /* Modify the STC not to point to an object */ @@ -473,6 +471,7 @@ static void hws_action_fill_stc_attr(struct mlx5hws_action *action, break; case MLX5HWS_ACTION_TYP_TBL: case MLX5HWS_ACTION_TYP_DEST_ARRAY: + case MLX5HWS_ACTION_TYP_SAMPLER: attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; attr->dest_table_id = obj_id; @@ -561,7 +560,7 @@ hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id) if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) { ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB, - &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + &action->stc); if (ret) goto out_err; } @@ -585,7 +584,7 @@ hws_action_destroy_stcs(struct mlx5hws_action *action) if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB, - &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + &action->stc); mutex_unlock(&ctx->ctrl_lock); } @@ -1639,8 +1638,8 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false); /* STC is a single resource (obj_id), use any STC for the ID */ - stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB]; - default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc; + stc_pool = ctx->stc_pool; + default_stc = ctx->common_res.default_stc; obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); rtc_attr.stc_base = obj_id; @@ -1731,7 +1730,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx, ste_attr.used_id_rtc_0 = &used_rtc_0_id; ste_attr.used_id_rtc_1 = &used_rtc_1_id; - common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB]; + common_res = &ctx->common_res; /* init an empty match STE which will always hit */ ste_attr.wqe_ctrl = &wqe_ctrl; @@ -1750,7 +1749,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx, wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |= htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29); wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = - htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset); + htonl(hit_ft_action->stc.offset); wqe_data_arr = (__force __be32 *)&range_wqe_data; @@ -1843,7 +1842,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB, - &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + &action->stc); if (ret) goto error_unlock; @@ -1875,7 +1874,50 @@ struct mlx5hws_action * mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx, u32 sampler_id, u32 flags) { - mlx5hws_err(ctx, "Flow sampler action - unsupported\n"); + struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; + struct mlx5hws_cmd_set_fte_attr fte_attr = {0}; + struct mlx5hws_cmd_forward_tbl *fw_island; + struct mlx5hws_cmd_set_fte_dest dest; + struct mlx5hws_action *action; + int ret; + + if (flags != (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) { + mlx5hws_err(ctx, "Unsupported flags for flow sampler\n"); + return NULL; + } + + ft_attr.type = FS_FT_FDB; + ft_attr.level = ctx->caps->fdb_ft.max_level - 1; + + dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; + dest.destination_id = sampler_id; + + fte_attr.dests_num = 1; + fte_attr.dests = &dest; + fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + fte_attr.ignore_flow_level = 1; + + fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr); + if (!fw_island) + return NULL; + + action = hws_action_create_generic(ctx, flags, + MLX5HWS_ACTION_TYP_SAMPLER); + if (!action) + goto destroy_fw_island; + + ret = hws_action_create_stcs(action, fw_island->ft_id); + if (ret) + goto free_action; + + action->flow_sampler.fw_island = fw_island; + + return action; + +free_action: + kfree(action); +destroy_fw_island: + mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island); return NULL; } @@ -1914,6 +1956,11 @@ static void hws_action_destroy_hws(struct mlx5hws_action *action) } kfree(action->dest_array.dest_list); break; + case MLX5HWS_ACTION_TYP_SAMPLER: + hws_action_destroy_stcs(action); + mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, + action->flow_sampler.fw_island); + break; case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: case MLX5HWS_ACTION_TYP_MODIFY_HDR: shared_arg = false; @@ -1970,8 +2017,8 @@ __must_hold(&ctx->ctrl_lock) struct mlx5hws_action_default_stc *default_stc; int ret; - if (ctx->common_res[tbl_type].default_stc) { - ctx->common_res[tbl_type].default_stc->refcount++; + if (ctx->common_res.default_stc) { + ctx->common_res.default_stc->refcount++; return 0; } @@ -2023,8 +2070,8 @@ __must_hold(&ctx->ctrl_lock) goto free_nop_dw7; } - ctx->common_res[tbl_type].default_stc = default_stc; - ctx->common_res[tbl_type].default_stc->refcount++; + ctx->common_res.default_stc = default_stc; + ctx->common_res.default_stc->refcount++; return 0; @@ -2046,9 +2093,7 @@ __must_hold(&ctx->ctrl_lock) { struct mlx5hws_action_default_stc *default_stc; - default_stc = ctx->common_res[tbl_type].default_stc; - - default_stc = ctx->common_res[tbl_type].default_stc; + default_stc = ctx->common_res.default_stc; if (--default_stc->refcount) return; @@ -2058,7 +2103,7 @@ __must_hold(&ctx->ctrl_lock) mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); kfree(default_stc); - ctx->common_res[tbl_type].default_stc = NULL; + ctx->common_res.default_stc = NULL; } static void hws_action_modify_write(struct mlx5hws_send_engine *queue, @@ -2150,8 +2195,7 @@ hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply, { struct mlx5hws_action *action = apply->rule_action[action_idx].action; - apply->wqe_ctrl->stc_ix[stc_idx] = - htonl(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[stc_idx] = htonl(action->stc.offset); } static void @@ -2181,7 +2225,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply, rule_action = &apply->rule_action[setter->idx_double]; action = rule_action->action; - stc_idx = htonl(action->stc[apply->tbl_type].offset); + stc_idx = htonl(action->stc.offset); apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; @@ -2240,7 +2284,7 @@ hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply, apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); - stc_idx = htonl(action->stc[apply->tbl_type].offset); + stc_idx = htonl(action->stc.offset); apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; @@ -2272,7 +2316,7 @@ hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply, apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); - stc_idx = htonl(action->stc[apply->tbl_type].offset); + stc_idx = htonl(action->stc.offset); apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; @@ -2434,6 +2478,7 @@ int mlx5hws_action_template_process(struct mlx5hws_action_template *at) case MLX5HWS_ACTION_TYP_DROP: case MLX5HWS_ACTION_TYP_TBL: case MLX5HWS_ACTION_TYP_DEST_ARRAY: + case MLX5HWS_ACTION_TYP_SAMPLER: case MLX5HWS_ACTION_TYP_VPORT: case MLX5HWS_ACTION_TYP_MISS: /* Hit action */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h index e8f562c31826..64b76075f7f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h @@ -70,12 +70,12 @@ struct mlx5hws_action_default_stc { struct mlx5hws_pool_chunk nop_dw6; struct mlx5hws_pool_chunk nop_dw7; struct mlx5hws_pool_chunk default_hit; - u32 refcount; + u32 refcount; /* protected by context ctrl lock */ }; struct mlx5hws_action_shared_stc { struct mlx5hws_pool_chunk stc_chunk; - u32 refcount; + u32 refcount; /* protected by context ctrl lock */ }; struct mlx5hws_actions_apply_data { @@ -124,7 +124,7 @@ struct mlx5hws_action { struct mlx5hws_context *ctx; union { struct { - struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX]; + struct mlx5hws_pool_chunk stc; union { struct { u32 pat_id; @@ -166,6 +166,9 @@ struct mlx5hws_action { struct mlx5hws_cmd_set_fte_dest *dest_list; } dest_array; struct { + struct mlx5hws_cmd_forward_tbl *fw_island; + } flow_sampler; + struct { u8 type; u8 start_anchor; u8 end_anchor; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index baacf662c0ab..3dbd4efa21a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -152,6 +152,8 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, if (!bwc_matcher) return NULL; + atomic_set(&bwc_matcher->num_of_rules, 0); + /* Check if the required match params can be all matched * in single STE, otherwise complex matcher is needed. */ @@ -199,10 +201,12 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher) int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) { - if (bwc_matcher->num_of_rules) + u32 num_of_rules = atomic_read(&bwc_matcher->num_of_rules); + + if (num_of_rules) mlx5hws_err(bwc_matcher->matcher->tbl->ctx, "BWC matcher destroy: matcher still has %d rules\n", - bwc_matcher->num_of_rules); + num_of_rules); mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); @@ -215,6 +219,8 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx, u32 *pending_rules, bool drain) { + unsigned long timeout = jiffies + + msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC); struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH]; u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id); bool got_comp = *pending_rules >= burst_th; @@ -250,6 +256,11 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx, } got_comp = !!ret; + + if (unlikely(!got_comp && time_after(jiffies, timeout))) { + mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id); + return -ETIMEDOUT; + } } return err; @@ -309,7 +320,7 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx) { struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; - bwc_matcher->num_of_rules++; + atomic_inc(&bwc_matcher->num_of_rules); bwc_rule->bwc_queue_idx = idx; list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]); } @@ -318,7 +329,7 @@ static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule) { struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; - bwc_matcher->num_of_rules--; + atomic_dec(&bwc_matcher->num_of_rules); list_del_init(&bwc_rule->list_node); } @@ -334,22 +345,21 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule, struct mlx5hws_rule_attr *rule_attr) { struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx; - struct mlx5hws_flow_op_result completion; + u32 expected_completions = 1; int ret; ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr); if (unlikely(ret)) return ret; - do { - ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1); - } while (ret != 1); - - if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS || - (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED && - bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) { - mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n", - completion.status, bwc_rule->rule->status); + ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + if (unlikely(ret)) + return ret; + + if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED && + bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) { + mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n", + bwc_rule->rule->status); return -EINVAL; } @@ -458,8 +468,22 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher) { struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps; - return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >= - caps->ste_alloc_log_max - 1; + /* check the match RTC size */ + if ((bwc_matcher->size_log + + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH + + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) > + (caps->ste_alloc_log_max - 1)) + return true; + + /* check the action RTC size */ + if ((bwc_matcher->size_log + + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP + + ilog2(roundup_pow_of_two(bwc_matcher->matcher->action_ste.max_stes)) + + MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT) > + (caps->ste_alloc_log_max - 1)) + return true; + + return false; } static bool @@ -615,8 +639,12 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id, &pending_rules[i], false); - if (unlikely(ret)) + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Moving BWC rule failed during rehash (%d)\n", + ret); goto free_bwc_rules; + } } } } while (!all_done); @@ -629,8 +657,11 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]); ret = hws_bwc_queue_poll(ctx, queue_id, &pending_rules[i], true); - if (unlikely(ret)) + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Moving BWC rule failed during rehash (%d)\n", ret); goto free_bwc_rules; + } } } @@ -704,7 +735,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) * Need to check again if we really need rehash. * If the reason for rehash was size, but not any more - skip rehash. */ - if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules)) + if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, + atomic_read(&bwc_matcher->num_of_rules))) return 0; /* Now we're done all the checking - do the rehash: @@ -797,7 +829,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, } /* check if number of rules require rehash */ - num_of_rules = bwc_matcher->num_of_rules; + num_of_rules = atomic_read(&bwc_matcher->num_of_rules); if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) { mutex_unlock(queue_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h index 0b745968e21e..f9f569131dde 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h @@ -8,10 +8,18 @@ #define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1 #define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70 #define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32 -#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255 + +/* Max number of AT attach operations for the same matcher. + * When the limit is reached, next attempt to attach new AT + * will result in creation of a new matcher and moving all + * the rules to this matcher. + */ +#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8 #define MLX5HWS_BWC_MAX_ACTS 16 +#define MLX5HWS_BWC_POLLING_TIMEOUT 60 + struct mlx5hws_bwc_matcher { struct mlx5hws_matcher *matcher; struct mlx5hws_match_template *mt; @@ -19,7 +27,7 @@ struct mlx5hws_bwc_matcher { u8 num_of_at; u16 priority; u8 size_log; - u32 num_of_rules; /* atomically accessed */ + atomic_t num_of_rules; struct list_head *rules; }; @@ -60,9 +68,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx) { /* Besides the control queue, half of the queues are - * reguler HWS queues, and the other half are BWC queues. + * regular HWS queues, and the other half are BWC queues. */ - return (ctx->queues - 1) / 2; + if (mlx5hws_context_bwc_supported(ctx)) + return (ctx->queues - 1) / 2; + return 0; } static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c index c00c138c3366..487e75476b0a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c @@ -257,6 +257,12 @@ int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev, dest->ext_reformat_id); } break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: + MLX5_SET(dest_format, in_dests, + destination_type, ifc_dest_type); + MLX5_SET(dest_format, in_dests, destination_id, + dest->destination_id); + break; default: ret = -EOPNOTSUPP; goto out; @@ -359,7 +365,7 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx, ft_attr->type = fw_ft_type; ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; - default_miss_tbl = ctx->common_res[type].default_miss->ft_id; + default_miss_tbl = ctx->common_res.default_miss->ft_id; if (!default_miss_tbl) { pr_warn("HWS: no flow table ID for default miss\n"); return; @@ -622,12 +628,12 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev, u32 pd, u32 *arg_id) { + u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {0}; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; - u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; void *attr; int ret; - attr = MLX5_ADDR_OF(create_arg_in, in, hdr); + attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr); MLX5_SET(general_obj_in_cmd_hdr, attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, @@ -635,8 +641,8 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev, MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.log_obj_range, log_obj_range); - attr = MLX5_ADDR_OF(create_arg_in, in, arg); - MLX5_SET(arg, attr, access_pd, pd); + attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg); + MLX5_SET(modify_header_arg, attr, access_pd, pd); ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (ret) { @@ -812,7 +818,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev, struct mlx5hws_cmd_packet_reformat_create_attr *attr, u32 *reformat_id) { - u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {0}; size_t insz, cmd_data_sz, cmd_total_sz; void *prctx; void *pdata; @@ -845,7 +851,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev, goto out; } - *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id); + *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id); out: kfree(in); return ret; @@ -854,13 +860,13 @@ out: int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev, u32 reformat_id) { - u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {0}; int ret; - MLX5_SET(dealloc_packet_reformat_in, in, opcode, + MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); - MLX5_SET(dealloc_packet_reformat_in, in, + MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, reformat_id); ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); @@ -889,73 +895,6 @@ int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn) return ret; } -int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev, - struct mlx5hws_cmd_allow_other_vhca_access_attr *attr) -{ - u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; - void *key; - int ret; - - MLX5_SET(allow_other_vhca_access_in, - in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); - MLX5_SET(allow_other_vhca_access_in, - in, object_type_to_be_accessed, attr->obj_type); - MLX5_SET(allow_other_vhca_access_in, - in, object_id_to_be_accessed, attr->obj_id); - - key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); - memcpy(key, attr->access_key, sizeof(attr->access_key)); - - ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); - if (ret) - mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n"); - - return ret; -} - -int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev, - struct mlx5hws_cmd_alias_obj_create_attr *alias_attr, - u32 *obj_id) -{ - u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; - u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; - void *attr; - void *key; - int ret; - - attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); - MLX5_SET(general_obj_in_cmd_hdr, - attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); - MLX5_SET(general_obj_in_cmd_hdr, - attr, obj_type, alias_attr->obj_type); - MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1); - - attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); - MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); - MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); - - key = MLX5_ADDR_OF(alias_context, attr, access_key); - memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); - - ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); - if (ret) { - mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n"); - goto out; - } - - *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); -out: - return ret; -} - -int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev, - u16 obj_type, - u32 obj_id) -{ - return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id); -} - int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev, struct mlx5hws_cmd_generate_wqe_attr *attr, struct mlx5_cqe64 *ret_cqe) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h index 434f62b0904e..610c63d81ad9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h @@ -63,7 +63,7 @@ struct mlx5hws_cmd_forward_tbl { u8 type; u32 ft_id; u32 fg_id; - u32 refcount; + u32 refcount; /* protected by context ctrl lock */ }; struct mlx5hws_cmd_rtc_create_attr { @@ -334,14 +334,6 @@ mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev, void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev, struct mlx5hws_cmd_forward_tbl *tbl); -int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev, - struct mlx5hws_cmd_alias_obj_create_attr *alias_attr, - u32 *obj_id); - -int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev, - u16 obj_type, - u32 obj_id); - int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn); int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev, @@ -352,9 +344,6 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx, enum mlx5hws_table_type type, struct mlx5hws_cmd_ft_modify_attr *ft_attr); -int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev, - struct mlx5hws_cmd_allow_other_vhca_access_attr *attr); - int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function, u16 vport_number, u16 *gvmi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c index fd48b05e91e0..9cda2774fd64 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c @@ -23,7 +23,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx) struct mlx5hws_pool_attr pool_attr = {0}; u8 max_log_sz; int ret; - int i; ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache); if (ret) @@ -39,23 +38,17 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx) max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max); pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran); - for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { - pool_attr.table_type = i; - ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr); - if (!ctx->stc_pool[i]) { - mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i); - ret = -ENOMEM; - goto free_stc_pools; - } + pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB; + ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!ctx->stc_pool) { + mlx5hws_err(ctx, "Failed to allocate STC pool\n"); + ret = -ENOMEM; + goto uninit_cache; } return 0; -free_stc_pools: - for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) - if (ctx->stc_pool[i]) - mlx5hws_pool_destroy(ctx->stc_pool[i]); - +uninit_cache: mlx5hws_definer_uninit_cache(ctx->definer_cache); uninit_pat_cache: mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache); @@ -64,12 +57,8 @@ uninit_pat_cache: static void hws_context_pools_uninit(struct mlx5hws_context *ctx) { - int i; - - for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { - if (ctx->stc_pool[i]) - mlx5hws_pool_destroy(ctx->stc_pool[i]); - } + if (ctx->stc_pool) + mlx5hws_pool_destroy(ctx->stc_pool); mlx5hws_definer_uninit_cache(ctx->definer_cache); mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache); @@ -161,8 +150,10 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx, if (ret) goto uninit_pd; - if (attr->bwc) - ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; + /* Context has support for backward compatible API, + * and does not have support for native HWS API. + */ + ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size); if (ret) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h index 47f5cc8de73f..38c3647444ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h @@ -8,6 +8,7 @@ enum mlx5hws_context_flags { MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0, MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1, MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2, + MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3, }; enum mlx5hws_context_shared_stc_type { @@ -37,8 +38,8 @@ struct mlx5hws_context { struct mlx5_core_dev *mdev; struct mlx5hws_cmd_query_caps *caps; u32 pd_num; - struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX]; - struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX]; + struct mlx5hws_pool *stc_pool; + struct mlx5hws_context_common_res common_res; struct mlx5hws_pattern_cache *pattern_cache; struct mlx5hws_definer_cache *definer_cache; struct mutex ctrl_lock; /* control lock to protect the whole context */ @@ -58,6 +59,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx) return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; } +static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx) +{ + return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT; +} + bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx); u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c index 5b200b4bc1a8..696275fd0ce2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c @@ -148,8 +148,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma matcher->match_ste.rtc_1_id, (int)ste_1_id); - ste = &matcher->action_ste[0].ste; - ste_pool = matcher->action_ste[0].pool; + ste = &matcher->action_ste.ste; + ste_pool = matcher->action_ste.pool; if (ste_pool) { ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) @@ -171,10 +171,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma return ret; seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n", - matcher->action_ste[0].rtc_0_id, - (int)ste_0_id, - matcher->action_ste[0].rtc_1_id, - (int)ste_1_id, + matcher->action_ste.rtc_0_id, (int)ste_0_id, + matcher->action_ste.rtc_1_id, (int)ste_1_id, 0, mlx5hws_debug_icm_to_idx(icm_addr_0), mlx5hws_debug_icm_to_idx(icm_addr_1)); @@ -368,9 +366,10 @@ static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_contex static int hws_debug_dump_context_stc_resource(struct seq_file *f, struct mlx5hws_context *ctx, - u32 tbl_type, struct mlx5hws_pool_resource *resource) { + u32 tbl_type = MLX5HWS_TABLE_TYPE_BASE + MLX5HWS_TABLE_TYPE_FDB; + seq_printf(f, "%d,0x%llx,%u,%u\n", MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC, HWS_PTR_TO_ID(ctx), @@ -382,31 +381,22 @@ static int hws_debug_dump_context_stc_resource(struct seq_file *f, static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx) { - struct mlx5hws_pool *stc_pool; - u32 table_type; + struct mlx5hws_pool *stc_pool = ctx->stc_pool; int ret; - int i; - for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { - stc_pool = ctx->stc_pool[i]; - table_type = MLX5HWS_TABLE_TYPE_BASE + i; - - if (!stc_pool) - continue; + if (!stc_pool) + return 0; - if (stc_pool->resource[0]) { - ret = hws_debug_dump_context_stc_resource(f, ctx, table_type, - stc_pool->resource[0]); - if (ret) - return ret; - } + if (stc_pool->resource[0]) { + ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->resource[0]); + if (ret) + return ret; + } - if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) { - ret = hws_debug_dump_context_stc_resource(f, ctx, table_type, - stc_pool->mirror_resource[0]); - if (ret) - return ret; - } + if (stc_pool->mirror_resource[0]) { + ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->mirror_resource[0]); + if (ret) + return ret; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c index 8fe96eb76baf..10ece7df1cfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c @@ -70,7 +70,7 @@ u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \ _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \ _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \ - (bit_off) % BITS_IN_DW, second_dw_mask); \ + (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \ } else { \ _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \ } \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h index 9432d5084def..5c1a2086efba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h @@ -785,7 +785,7 @@ struct mlx5hws_definer_cache { struct mlx5hws_definer_cache_item { struct mlx5hws_definer definer; - u32 refcount; + u32 refcount; /* protected by context ctrl lock */ struct list_head list_node; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c new file mode 100644 index 000000000000..f34bbbbba1c2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c @@ -0,0 +1,1377 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */ + +#include <linux/mlx5/vport.h> +#include <mlx5_core.h> +#include <fs_core.h> +#include <fs_cmd.h> +#include "fs_hws_pools.h" +#include "mlx5hws.h" + +#define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16 +#define MLX5HWS_CTX_QUEUE_SIZE 256 + +static struct mlx5hws_action * +mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx); +static void +mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools, + unsigned long index); +static void +mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools, + unsigned long index); + +static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev, + struct mlx5_fs_hws_context *fs_ctx) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool; + struct mlx5hws_action_reformat_header reformat_hdr = {}; + struct mlx5hws_context *ctx = fs_ctx->hws_ctx; + enum mlx5hws_action_type action_type; + int err = -ENOSPC; + + hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags); + if (!hws_pool->tag_action) + return err; + hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags); + if (!hws_pool->pop_vlan_action) + goto destroy_tag; + hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags); + if (!hws_pool->push_vlan_action) + goto destroy_pop_vlan; + hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags); + if (!hws_pool->drop_action) + goto destroy_push_vlan; + action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2; + hws_pool->decapl2_action = + mlx5hws_action_create_reformat(ctx, action_type, 1, + &reformat_hdr, 0, flags); + if (!hws_pool->decapl2_action) + goto destroy_drop; + hws_pool->remove_hdr_vlan_action = + mlx5_fs_create_action_remove_header_vlan(ctx); + if (!hws_pool->remove_hdr_vlan_action) + goto destroy_decapl2; + err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0, + MLX5HWS_ACTION_TYP_INSERT_HEADER); + if (err) + goto destroy_remove_hdr; + err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0, + MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2); + if (err) + goto cleanup_insert_hdr; + xa_init(&hws_pool->el2tol3tnl_pools); + xa_init(&hws_pool->el2tol2tnl_pools); + xa_init(&hws_pool->mh_pools); + xa_init(&hws_pool->table_dests); + xa_init(&hws_pool->vport_dests); + xa_init(&hws_pool->vport_vhca_dests); + return 0; + +cleanup_insert_hdr: + mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool); +destroy_remove_hdr: + mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action); +destroy_decapl2: + mlx5hws_action_destroy(hws_pool->decapl2_action); +destroy_drop: + mlx5hws_action_destroy(hws_pool->drop_action); +destroy_push_vlan: + mlx5hws_action_destroy(hws_pool->push_vlan_action); +destroy_pop_vlan: + mlx5hws_action_destroy(hws_pool->pop_vlan_action); +destroy_tag: + mlx5hws_action_destroy(hws_pool->tag_action); + return err; +} + +static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx) +{ + struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool; + struct mlx5hws_action *action; + struct mlx5_fs_pool *pool; + unsigned long i; + + xa_for_each(&hws_pool->vport_vhca_dests, i, action) + mlx5hws_action_destroy(action); + xa_destroy(&hws_pool->vport_vhca_dests); + xa_for_each(&hws_pool->vport_dests, i, action) + mlx5hws_action_destroy(action); + xa_destroy(&hws_pool->vport_dests); + xa_destroy(&hws_pool->table_dests); + xa_for_each(&hws_pool->mh_pools, i, pool) + mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i); + xa_destroy(&hws_pool->mh_pools); + xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool) + mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i); + xa_destroy(&hws_pool->el2tol2tnl_pools); + xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool) + mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i); + xa_destroy(&hws_pool->el2tol3tnl_pools); + mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool); + mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool); + mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action); + mlx5hws_action_destroy(hws_pool->decapl2_action); + mlx5hws_action_destroy(hws_pool->drop_action); + mlx5hws_action_destroy(hws_pool->push_vlan_action); + mlx5hws_action_destroy(hws_pool->pop_vlan_action); + mlx5hws_action_destroy(hws_pool->tag_action); +} + +static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns) +{ + struct mlx5hws_context_attr hws_ctx_attr = {}; + int err; + + hws_ctx_attr.queues = min_t(int, num_online_cpus(), + MLX5HWS_CTX_MAX_NUM_OF_QUEUES); + hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE; + + ns->fs_hws_context.hws_ctx = + mlx5hws_context_open(ns->dev, &hws_ctx_attr); + if (!ns->fs_hws_context.hws_ctx) { + mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n"); + return -EINVAL; + } + err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context); + if (err) { + mlx5_core_err(ns->dev, "Failed to init hws actions pool\n"); + mlx5hws_context_close(ns->fs_hws_context.hws_ctx); + return err; + } + return 0; +} + +static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns) +{ + mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context); + return mlx5hws_context_close(ns->fs_hws_context.hws_ctx); +} + +static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_root_namespace *peer_ns, + u16 peer_vhca_id) +{ + struct mlx5hws_context *peer_ctx = NULL; + + if (peer_ns) + peer_ctx = peer_ns->fs_hws_context.hws_ctx; + mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx, + peer_vhca_id); + return 0; +} + +static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + struct mlx5hws_table *next_tbl; + int err; + + if (!ns->fs_hws_context.hws_ctx) + return -EINVAL; + + /* if no change required, return */ + if (!next_ft && !ft->fs_hws_table.miss_ft_set) + return 0; + + next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL; + err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl); + if (err) { + mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err); + return err; + } + ft->fs_hws_table.miss_ft_set = !!next_tbl; + return 0; +} + +static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context; + struct mlx5hws_action *dest_ft_action; + struct xarray *dests_xa; + int err; + + dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx, + ft->id, flags); + if (!dest_ft_action) { + mlx5_core_err(ns->dev, "Failed creating dest table action\n"); + return -ENOMEM; + } + + dests_xa = &fs_ctx->hws_pool.table_dests; + err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL); + if (err) + mlx5hws_action_destroy(dest_ft_action); + return err; +} + +static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft) +{ + struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context; + struct mlx5hws_action *dest_ft_action; + struct xarray *dests_xa; + int err; + + dests_xa = &fs_ctx->hws_pool.table_dests; + dest_ft_action = xa_erase(dests_xa, ft->id); + if (!dest_ft_action) { + mlx5_core_err(ns->dev, "Failed to erase dest ft action\n"); + return -ENOENT; + } + + err = mlx5hws_action_destroy(dest_ft_action); + if (err) + mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n"); + return err; +} + +static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_table_attr *ft_attr, + struct mlx5_flow_table *next_ft) +{ + struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx; + struct mlx5hws_table_attr tbl_attr = {}; + struct mlx5hws_table *tbl; + int err; + + if (mlx5_fs_cmd_is_fw_term_table(ft)) { + err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr, + next_ft); + if (err) + return err; + err = mlx5_fs_add_flow_table_dest_action(ns, ft); + if (err) + mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft); + return err; + } + + if (ns->table_type != FS_FT_FDB) { + mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n", + ns->table_type); + return -EOPNOTSUPP; + } + + tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB; + tbl_attr.level = ft_attr->level; + tbl = mlx5hws_table_create(ctx, &tbl_attr); + if (!tbl) { + mlx5_core_err(ns->dev, "Failed creating hws flow_table\n"); + return -EINVAL; + } + + ft->fs_hws_table.hws_table = tbl; + ft->id = mlx5hws_table_get_id(tbl); + + if (next_ft) { + err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft); + if (err) + goto destroy_table; + } + + ft->max_fte = INT_MAX; + + err = mlx5_fs_add_flow_table_dest_action(ns, ft); + if (err) + goto clear_ft_miss; + return 0; + +clear_ft_miss: + mlx5_fs_set_ft_default_miss(ns, ft, NULL); +destroy_table: + mlx5hws_table_destroy(tbl); + ft->fs_hws_table.hws_table = NULL; + return err; +} + +static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft) +{ + int err; + + err = mlx5_fs_del_flow_table_dest_action(ns, ft); + if (err) + mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err); + + if (mlx5_fs_cmd_is_fw_term_table(ft)) + return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft); + + err = mlx5_fs_set_ft_default_miss(ns, ft, NULL); + if (err) + mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err); + + err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table); + if (err) + mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err); + + return err; +} + +static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + if (mlx5_fs_cmd_is_fw_term_table(ft)) + return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); + + return mlx5_fs_set_ft_default_miss(ns, ft, next_ft); +} + +static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + u32 underlay_qpn, + bool disconnect) +{ + return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn, + disconnect); +} + +static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, u32 *in, + struct mlx5_flow_group *fg) +{ + struct mlx5hws_match_parameters mask; + struct mlx5hws_bwc_matcher *matcher; + u8 match_criteria_enable; + u32 priority; + + if (mlx5_fs_cmd_is_fw_term_table(ft)) + return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg); + + mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + mask.match_sz = sizeof(fg->mask.match_criteria); + + match_criteria_enable = MLX5_GET(create_flow_group_in, in, + match_criteria_enable); + priority = MLX5_GET(create_flow_group_in, in, start_flow_index); + matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table, + priority, match_criteria_enable, + &mask); + if (!matcher) { + mlx5_core_err(ns->dev, "Failed creating matcher\n"); + return -EINVAL; + } + + fg->fs_hws_matcher.matcher = matcher; + return 0; +} + +static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg) +{ + if (mlx5_fs_cmd_is_fw_term_table(ft)) + return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg); + + return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher); +} + +static struct mlx5hws_action * +mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_flow_rule *dst) +{ + return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id); +} + +static struct mlx5hws_action * +mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_flow_rule *dst) +{ + u32 table_num = dst->dest_attr.ft_num; + + return xa_load(&fs_ctx->hws_pool.table_dests, table_num); +} + +static struct mlx5hws_action * +mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_flow_rule *dst) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5hws_context *ctx = fs_ctx->hws_ctx; + u32 table_num = dst->dest_attr.ft_num; + + return mlx5hws_action_create_dest_table_num(ctx, table_num, flags); +} + +static struct mlx5hws_action * +mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_flow_rule *dst, + bool is_dest_type_uplink) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5_flow_destination *dest_attr = &dst->dest_attr; + struct mlx5hws_context *ctx = fs_ctx->hws_ctx; + struct mlx5hws_action *dest; + struct xarray *dests_xa; + bool vhca_id_valid; + unsigned long idx; + u16 vport_num; + int err; + + vhca_id_valid = is_dest_type_uplink || + (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID); + vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num; + if (vhca_id_valid) { + dests_xa = &fs_ctx->hws_pool.vport_vhca_dests; + idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num; + } else { + dests_xa = &fs_ctx->hws_pool.vport_dests; + idx = vport_num; + } +dest_load: + dest = xa_load(dests_xa, idx); + if (dest) + return dest; + + dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid, + dest_attr->vport.vhca_id, flags); + + err = xa_insert(dests_xa, idx, dest, GFP_KERNEL); + if (err) { + mlx5hws_action_destroy(dest); + dest = NULL; + + if (err == -EBUSY) + /* xarray entry was already stored by another thread */ + goto dest_load; + } + + return dest; +} + +static struct mlx5hws_action * +mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx, + struct mlx5_flow_rule *dst) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5_flow_destination *dest_attr = &dst->dest_attr; + + return mlx5hws_action_create_dest_match_range(ctx, + dest_attr->range.field, + dest_attr->range.hit_ft, + dest_attr->range.miss_ft, + dest_attr->range.min, + dest_attr->range.max, + flags); +} + +static struct mlx5hws_action * +mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx, + struct mlx5hws_action_dest_attr *dests, + u32 num_of_dests, bool ignore_flow_level, + u32 flow_source) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + + return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests, + ignore_flow_level, + flow_source, flags); +} + +static struct mlx5hws_action * +mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx) +{ + return fs_ctx->hws_pool.push_vlan_action; +} + +static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan) +{ + u16 n_ethtype = vlan->ethtype; + u8 prio = vlan->prio; + u16 vid = vlan->vid; + + return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid; +} + +static struct mlx5hws_action * +mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx) +{ + return fs_ctx->hws_pool.pop_vlan_action; +} + +static struct mlx5hws_action * +mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx) +{ + return fs_ctx->hws_pool.decapl2_action; +} + +static struct mlx5hws_action * +mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx) +{ + return fs_ctx->hws_pool.drop_action; +} + +static struct mlx5hws_action * +mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx) +{ + return fs_ctx->hws_pool.tag_action; +} + +static struct mlx5hws_action * +mlx5_fs_create_action_last(struct mlx5hws_context *ctx) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + + return mlx5hws_action_create_last(ctx, flags); +} + +static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action) +{ + switch (mlx5hws_action_get_type(fs_action->action)) { + case MLX5HWS_ACTION_TYP_CTR: + mlx5_fc_put_hws_action(fs_action->counter); + break; + default: + mlx5hws_action_destroy(fs_action->action); + } +} + +static void +mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions, + int *num_fs_actions) +{ + int i; + + /* Free in reverse order to handle action dependencies */ + for (i = *num_fs_actions - 1; i >= 0; i--) + mlx5_fs_destroy_fs_action(*fs_actions + i); + *num_fs_actions = 0; + kfree(*fs_actions); + *fs_actions = NULL; +} + +/* Splits FTE's actions into cached, rule and destination actions. + * The cached and destination actions are saved on the fte hws rule. + * The rule actions are returned as a parameter, together with their count. + * We want to support a rule with 32 destinations, which means we need to + * account for 32 destinations plus usually a counter plus one more action + * for a multi-destination flow table. + * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher + */ +#define MLX5_FLOW_CONTEXT_ACTION_MAX 34 +static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + struct fs_fte *fte, + struct mlx5hws_rule_action **ractions) +{ + struct mlx5_flow_act *fte_action = &fte->act_dests.action; + struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context; + struct mlx5hws_action_dest_attr *dest_actions; + struct mlx5hws_context *ctx = fs_ctx->hws_ctx; + struct mlx5_fs_hws_rule_action *fs_actions; + struct mlx5_core_dev *dev = ns->dev; + struct mlx5hws_action *dest_action; + struct mlx5hws_action *tmp_action; + struct mlx5_fs_hws_pr *pr_data; + struct mlx5_fs_hws_mh *mh_data; + bool delay_encap_set = false; + struct mlx5_flow_rule *dst; + int num_dest_actions = 0; + int num_fs_actions = 0; + int num_actions = 0; + int err; + + *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions), + GFP_KERNEL); + if (!*ractions) { + err = -ENOMEM; + goto out_err; + } + + fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, + sizeof(*fs_actions), GFP_KERNEL); + if (!fs_actions) { + err = -ENOMEM; + goto free_actions_alloc; + } + + dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, + sizeof(*dest_actions), GFP_KERNEL); + if (!dest_actions) { + err = -ENOMEM; + goto free_fs_actions_alloc; + } + + /* The order of the actions are must to be kept, only the following + * order is supported by HW steering: + * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan + * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso + * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last + */ + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx); + if (!tmp_action) { + err = -ENOMEM; + goto free_dest_actions_alloc; + } + (*ractions)[num_actions++].action = tmp_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { + int reformat_type = fte_action->pkt_reformat->reformat_type; + + if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { + mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n"); + err = -EINVAL; + goto free_actions; + } + + if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) { + pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data; + (*ractions)[num_actions].reformat.offset = pr_data->offset; + (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx; + (*ractions)[num_actions].reformat.data = pr_data->data; + (*ractions)[num_actions++].action = + fte_action->pkt_reformat->fs_hws_action.hws_action; + } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) { + (*ractions)[num_actions++].action = + fte_action->pkt_reformat->fs_hws_action.hws_action; + } else { + delay_encap_set = true; + } + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { + tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + (*ractions)[num_actions++].action = tmp_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { + tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + (*ractions)[num_actions++].action = tmp_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + mh_data = fte_action->modify_hdr->fs_hws_action.mh_data; + (*ractions)[num_actions].modify_header.offset = mh_data->offset; + (*ractions)[num_actions].modify_header.data = mh_data->data; + (*ractions)[num_actions++].action = + fte_action->modify_hdr->fs_hws_action.hws_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + (*ractions)[num_actions].push_vlan.vlan_hdr = + htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0])); + (*ractions)[num_actions++].action = tmp_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + (*ractions)[num_actions].push_vlan.vlan_hdr = + htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1])); + (*ractions)[num_actions++].action = tmp_action; + } + + if (delay_encap_set) { + pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data; + (*ractions)[num_actions].reformat.offset = pr_data->offset; + (*ractions)[num_actions].reformat.data = pr_data->data; + (*ractions)[num_actions++].action = + fte_action->pkt_reformat->fs_hws_action.hws_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + list_for_each_entry(dst, &fte->node.children, node.list) { + struct mlx5_fc *counter; + + if (dst->dest_attr.type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } + + counter = dst->dest_attr.counter; + tmp_action = mlx5_fc_get_hws_action(ctx, counter); + if (!tmp_action) { + err = -EINVAL; + goto free_actions; + } + + (*ractions)[num_actions].counter.offset = + mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter); + (*ractions)[num_actions++].action = tmp_action; + fs_actions[num_fs_actions].action = tmp_action; + fs_actions[num_fs_actions++].counter = counter; + } + } + + if (fte->act_dests.flow_context.flow_tag) { + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } + tmp_action = mlx5_fs_get_action_tag(fs_ctx); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag; + (*ractions)[num_actions++].action = tmp_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { + err = -EOPNOTSUPP; + goto free_actions; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) { + dest_action = mlx5_fs_get_dest_action_drop(fs_ctx); + if (!dest_action) { + err = -ENOMEM; + goto free_actions; + } + dest_actions[num_dest_actions++].dest = dest_action; + } + + if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + list_for_each_entry(dst, &fte->node.children, node.list) { + struct mlx5_flow_destination *attr = &dst->dest_attr; + bool type_uplink = + attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK; + + if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } + if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + switch (attr->type) { + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst); + break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: + dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx, + dst); + if (dest_action) + break; + dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx, + dst); + fs_actions[num_fs_actions++].action = dest_action; + break; + case MLX5_FLOW_DESTINATION_TYPE_RANGE: + dest_action = mlx5_fs_create_dest_action_range(ctx, dst); + fs_actions[num_fs_actions++].action = dest_action; + break; + case MLX5_FLOW_DESTINATION_TYPE_UPLINK: + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst, + type_uplink); + break; + default: + err = -EOPNOTSUPP; + goto free_actions; + } + if (!dest_action) { + err = -ENOMEM; + goto free_actions; + } + dest_actions[num_dest_actions++].dest = dest_action; + } + } + + if (num_dest_actions == 1) { + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } + (*ractions)[num_actions++].action = dest_actions->dest; + } else if (num_dest_actions > 1) { + u32 flow_source = fte->act_dests.flow_context.flow_source; + bool ignore_flow_level; + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } + ignore_flow_level = + !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions, + num_dest_actions, + ignore_flow_level, + flow_source); + if (!tmp_action) { + err = -EOPNOTSUPP; + goto free_actions; + } + fs_actions[num_fs_actions++].action = tmp_action; + (*ractions)[num_actions++].action = tmp_action; + } + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } + + tmp_action = mlx5_fs_create_action_last(ctx); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_actions[num_fs_actions++].action = tmp_action; + (*ractions)[num_actions++].action = tmp_action; + + kfree(dest_actions); + + /* Actions created specifically for this rule will be destroyed + * once rule is deleted. + */ + fte->fs_hws_rule.num_fs_actions = num_fs_actions; + fte->fs_hws_rule.hws_fs_actions = fs_actions; + + return 0; + +free_actions: + mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions); +free_dest_actions_alloc: + kfree(dest_actions); +free_fs_actions_alloc: + kfree(fs_actions); +free_actions_alloc: + kfree(*ractions); + *ractions = NULL; +out_err: + return err; +} + +static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + struct fs_fte *fte) +{ + struct mlx5hws_match_parameters params; + struct mlx5hws_rule_action *ractions; + struct mlx5hws_bwc_rule *rule; + int err = 0; + + if (mlx5_fs_cmd_is_fw_term_table(ft)) { + /* Packet reformat on terminamtion table not supported yet */ + if (fte->act_dests.action.action & + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) + return -EOPNOTSUPP; + return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); + } + + err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions); + if (err) + goto out_err; + + params.match_sz = sizeof(fte->val); + params.match_buf = fte->val; + + rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, ¶ms, + fte->act_dests.flow_context.flow_source, + ractions); + kfree(ractions); + if (!rule) { + err = -EINVAL; + goto free_actions; + } + + fte->fs_hws_rule.bwc_rule = rule; + return 0; + +free_actions: + mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions, + &fte->fs_hws_rule.num_fs_actions); +out_err: + mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err); + return err; +} + +static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct fs_fte *fte) +{ + struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule; + int err; + + if (mlx5_fs_cmd_is_fw_term_table(ft)) + return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte); + + err = mlx5hws_bwc_rule_destroy(rule->bwc_rule); + rule->bwc_rule = NULL; + + mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions); + + return err; +} + +static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + int modify_mask, + struct fs_fte *fte) +{ + int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) | + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); + struct mlx5_fs_hws_rule_action *saved_hws_fs_actions; + struct mlx5hws_rule_action *ractions; + int saved_num_fs_actions; + int ret; + + if (mlx5_fs_cmd_is_fw_term_table(ft)) + return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, + modify_mask, fte); + + if ((modify_mask & ~allowed_mask) != 0) + return -EINVAL; + + saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions; + saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions; + + ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions); + if (ret) + return ret; + + ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions); + kfree(ractions); + if (ret) + goto restore_actions; + + mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions); + return ret; + +restore_actions: + mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions, + &fte->fs_hws_rule.num_fs_actions); + fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions; + fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions; + return ret; +} + +static struct mlx5hws_action * +mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {}; + + /* MAC anchor not supported in HWS reformat, use VLAN anchor */ + remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START; + remove_hdr_vlan.offset = 0; + remove_hdr_vlan.size = sizeof(struct vlan_hdr); + return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags); +} + +static struct mlx5hws_action * +mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_pkt_reformat_params *params) +{ + if (!params || + params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START || + params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) || + params->size != sizeof(struct vlan_hdr)) + return NULL; + + return fs_ctx->hws_pool.remove_hdr_vlan_action; +} + +static int +mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev, + struct mlx5_pkt_reformat_params *params) +{ + if ((!params->data && params->size) || (params->data && !params->size) || + MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size || + MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) { + mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n"); + return -EINVAL; + } + if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR || + params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET || + params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) { + mlx5_core_err(mdev, "Only vlan insert header supported\n"); + return -EOPNOTSUPP; + } + return 0; +} + +static int +mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev, + struct mlx5_pkt_reformat_params *params) +{ + if (params->param_0 || params->param_1) { + mlx5_core_err(dev, "Invalid reformat params\n"); + return -EINVAL; + } + return 0; +} + +static struct mlx5_fs_pool * +mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools, + enum mlx5hws_action_type reformat_type, size_t size) +{ + struct mlx5_fs_pool *pr_pool; + unsigned long index = size; + int err; + + pr_pool = xa_load(pr_pools, index); + if (pr_pool) + return pr_pool; + + pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL); + if (!pr_pool) + return ERR_PTR(-ENOMEM); + err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type); + if (err) + goto free_pr_pool; + err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL); + if (err) + goto cleanup_pr_pool; + return pr_pool; + +cleanup_pr_pool: + mlx5_fs_hws_pr_pool_cleanup(pr_pool); +free_pr_pool: + kfree(pr_pool); + return ERR_PTR(err); +} + +static void +mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools, + unsigned long index) +{ + xa_erase(pr_pools, index); + mlx5_fs_hws_pr_pool_cleanup(pool); + kfree(pool); +} + +static int +mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_pkt_reformat_params *params, + enum mlx5_flow_namespace_type namespace, + struct mlx5_pkt_reformat *pkt_reformat) +{ + struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context; + struct mlx5_fs_hws_actions_pool *hws_pool; + struct mlx5hws_action *hws_action = NULL; + struct mlx5_fs_hws_pr *pr_data = NULL; + struct mlx5_fs_pool *pr_pool = NULL; + struct mlx5_core_dev *dev = ns->dev; + u8 hdr_idx = 0; + int err; + + if (!params) + return -EINVAL; + + hws_pool = &fs_ctx->hws_pool; + + switch (params->type) { + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: + case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: + if (mlx5_fs_verify_encap_decap_params(dev, params)) + return -EINVAL; + pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2, + params->size); + if (IS_ERR(pr_pool)) + return PTR_ERR(pr_pool); + break; + case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: + if (mlx5_fs_verify_encap_decap_params(dev, params)) + return -EINVAL; + pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3, + params->size); + if (IS_ERR(pr_pool)) + return PTR_ERR(pr_pool); + break; + case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2: + if (mlx5_fs_verify_encap_decap_params(dev, params)) + return -EINVAL; + pr_pool = &hws_pool->dl3tnltol2_pool; + hdr_idx = params->size == ETH_HLEN ? + MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX : + MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX; + break; + case MLX5_REFORMAT_TYPE_INSERT_HDR: + err = mlx5_fs_verify_insert_header_params(dev, params); + if (err) + return err; + pr_pool = &hws_pool->insert_hdr_pool; + break; + case MLX5_REFORMAT_TYPE_REMOVE_HDR: + hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params); + if (!hws_action) + mlx5_core_err(dev, "Only vlan remove header supported\n"); + break; + default: + mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n", + params->type); + return -EOPNOTSUPP; + } + + if (pr_pool) { + pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool); + if (IS_ERR_OR_NULL(pr_data)) + return !pr_data ? -EINVAL : PTR_ERR(pr_data); + hws_action = pr_data->bulk->hws_action; + if (!hws_action) { + mlx5_core_err(dev, + "Failed allocating packet-reformat action\n"); + err = -EINVAL; + goto release_pr; + } + pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL); + if (!pr_data->data) { + err = -ENOMEM; + goto release_pr; + } + pr_data->hdr_idx = hdr_idx; + pr_data->data_size = params->size; + pkt_reformat->fs_hws_action.pr_data = pr_data; + } + + pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW; + pkt_reformat->fs_hws_action.hws_action = hws_action; + return 0; + +release_pr: + if (pr_pool && pr_data) + mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data); + return err; +} + +static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_pkt_reformat *pkt_reformat) +{ + struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool; + struct mlx5_core_dev *dev = ns->dev; + struct mlx5_fs_hws_pr *pr_data; + struct mlx5_fs_pool *pr_pool; + + if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) + return; + + if (!pkt_reformat->fs_hws_action.pr_data) { + mlx5_core_err(ns->dev, "Failed release packet-reformat\n"); + return; + } + pr_data = pkt_reformat->fs_hws_action.pr_data; + + switch (pkt_reformat->reformat_type) { + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: + case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: + pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2, + pr_data->data_size); + break; + case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: + pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2, + pr_data->data_size); + break; + case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2: + pr_pool = &hws_pool->dl3tnltol2_pool; + break; + case MLX5_REFORMAT_TYPE_INSERT_HDR: + pr_pool = &hws_pool->insert_hdr_pool; + break; + default: + mlx5_core_err(ns->dev, "Unknown packet-reformat type\n"); + return; + } + if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) { + mlx5_core_err(ns->dev, "Failed release packet-reformat\n"); + return; + } + kfree(pr_data->data); + mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data); + pkt_reformat->fs_hws_action.pr_data = NULL; +} + +static struct mlx5_fs_pool * +mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev, + struct mlx5hws_action_mh_pattern *pattern, + struct xarray *mh_pools, unsigned long index) +{ + struct mlx5_fs_pool *pool; + int err; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern); + if (err) + goto free_pool; + err = xa_insert(mh_pools, index, pool, GFP_KERNEL); + if (err) + goto cleanup_pool; + return pool; + +cleanup_pool: + mlx5_fs_hws_mh_pool_cleanup(pool); +free_pool: + kfree(pool); + return ERR_PTR(err); +} + +static void +mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools, + unsigned long index) +{ + xa_erase(mh_pools, index); + mlx5_fs_hws_mh_pool_cleanup(pool); + kfree(pool); +} + +static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns, + u8 namespace, u8 num_actions, + void *modify_actions, + struct mlx5_modify_hdr *modify_hdr) +{ + struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool; + struct mlx5hws_action_mh_pattern pattern = {}; + struct mlx5_fs_hws_mh *mh_data = NULL; + struct mlx5hws_action *hws_action; + struct mlx5_fs_pool *pool; + unsigned long i, cnt = 0; + bool known_pattern; + int err; + + pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions; + pattern.data = modify_actions; + + known_pattern = false; + xa_for_each(&hws_pool->mh_pools, i, pool) { + if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) { + known_pattern = true; + break; + } + cnt++; + } + + if (!known_pattern) { + pool = mlx5_fs_create_mh_pool(ns->dev, &pattern, + &hws_pool->mh_pools, cnt); + if (IS_ERR(pool)) + return PTR_ERR(pool); + } + mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool); + if (IS_ERR(mh_data)) { + err = PTR_ERR(mh_data); + goto destroy_pool; + } + hws_action = mh_data->bulk->hws_action; + mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL); + if (!mh_data->data) { + err = -ENOMEM; + goto release_mh; + } + modify_hdr->fs_hws_action.mh_data = mh_data; + modify_hdr->fs_hws_action.fs_pool = pool; + modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW; + modify_hdr->fs_hws_action.hws_action = hws_action; + + return 0; + +release_mh: + mlx5_fs_hws_mh_pool_release_mh(pool, mh_data); +destroy_pool: + if (!known_pattern) + mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt); + return err; +} + +static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_modify_hdr *modify_hdr) +{ + struct mlx5_fs_hws_mh *mh_data; + struct mlx5_fs_pool *pool; + + if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) { + mlx5_core_err(ns->dev, "Failed release modify-header\n"); + return; + } + + mh_data = modify_hdr->fs_hws_action.mh_data; + kfree(mh_data->data); + pool = modify_hdr->fs_hws_action.fs_pool; + mlx5_fs_hws_mh_pool_release_mh(pool, mh_data); + modify_hdr->fs_hws_action.mh_data = NULL; +} + +static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns, + u16 format_id, u32 *match_mask) +{ + return -EOPNOTSUPP; +} + +static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns, + int definer_id) +{ + return -EOPNOTSUPP; +} + +static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns, + enum fs_flow_table_type ft_type) +{ + if (ft_type != FS_FT_FDB) + return 0; + + return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | + MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX | + MLX5_FLOW_STEERING_CAP_MATCH_RANGES; +} + +bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev) +{ + return mlx5hws_is_supported(dev); +} + +static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = { + .create_flow_table = mlx5_cmd_hws_create_flow_table, + .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table, + .modify_flow_table = mlx5_cmd_hws_modify_flow_table, + .update_root_ft = mlx5_cmd_hws_update_root_ft, + .create_flow_group = mlx5_cmd_hws_create_flow_group, + .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group, + .create_fte = mlx5_cmd_hws_create_fte, + .delete_fte = mlx5_cmd_hws_delete_fte, + .update_fte = mlx5_cmd_hws_update_fte, + .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc, + .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc, + .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc, + .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc, + .create_match_definer = mlx5_cmd_hws_create_match_definer, + .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer, + .create_ns = mlx5_cmd_hws_create_ns, + .destroy_ns = mlx5_cmd_hws_destroy_ns, + .set_peer = mlx5_cmd_hws_set_peer, + .get_capabilities = mlx5_cmd_hws_get_capabilities, +}; + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void) +{ + return &mlx5_flow_cmds_hws; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h new file mode 100644 index 000000000000..cbddb72d4362 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */ + +#ifndef _MLX5_FS_HWS_ +#define _MLX5_FS_HWS_ + +#include "mlx5hws.h" +#include "fs_hws_pools.h" + +struct mlx5_fs_hws_actions_pool { + struct mlx5hws_action *tag_action; + struct mlx5hws_action *pop_vlan_action; + struct mlx5hws_action *push_vlan_action; + struct mlx5hws_action *drop_action; + struct mlx5hws_action *decapl2_action; + struct mlx5hws_action *remove_hdr_vlan_action; + struct mlx5_fs_pool insert_hdr_pool; + struct mlx5_fs_pool dl3tnltol2_pool; + struct xarray el2tol3tnl_pools; + struct xarray el2tol2tnl_pools; + struct xarray mh_pools; + struct xarray table_dests; + struct xarray vport_vhca_dests; + struct xarray vport_dests; +}; + +struct mlx5_fs_hws_context { + struct mlx5hws_context *hws_ctx; + struct mlx5_fs_hws_actions_pool hws_pool; +}; + +struct mlx5_fs_hws_table { + struct mlx5hws_table *hws_table; + bool miss_ft_set; +}; + +struct mlx5_fs_hws_action { + struct mlx5hws_action *hws_action; + struct mlx5_fs_pool *fs_pool; + struct mlx5_fs_hws_pr *pr_data; + struct mlx5_fs_hws_mh *mh_data; +}; + +struct mlx5_fs_hws_matcher { + struct mlx5hws_bwc_matcher *matcher; +}; + +struct mlx5_fs_hws_rule_action { + struct mlx5hws_action *action; + union { + struct mlx5_fc *counter; + }; +}; + +struct mlx5_fs_hws_rule { + struct mlx5hws_bwc_rule *bwc_rule; + struct mlx5_fs_hws_rule_action *hws_fs_actions; + int num_fs_actions; +}; + +#ifdef CONFIG_MLX5_HW_STEERING + +bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev); + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void); + +#else + +static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev) +{ + return false; +} + +static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void) +{ + return NULL; +} + +#endif /* CONFIG_MLX5_HWS_STEERING */ +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c new file mode 100644 index 000000000000..2ae4ac62b0e2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */ + +#include <mlx5_core.h> +#include "fs_hws_pools.h" + +#define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536 +#define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18) +#define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10 + +static struct mlx5hws_action * +mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx) +{ + struct mlx5hws_action_reformat_header reformat_hdr[2] = {}; + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB; + enum mlx5hws_action_type reformat_type; + u32 log_bulk_size; + + reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2; + reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN; + reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN; + + log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN); + return mlx5hws_action_create_reformat(ctx, reformat_type, 2, + reformat_hdr, log_bulk_size, flags); +} + +static struct mlx5hws_action * +mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size) +{ + struct mlx5hws_action_reformat_header reformat_hdr = {}; + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB; + enum mlx5hws_action_type reformat_type; + u32 log_bulk_size; + + reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3; + reformat_hdr.sz = data_size; + + log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN); + return mlx5hws_action_create_reformat(ctx, reformat_type, 1, + &reformat_hdr, log_bulk_size, flags); +} + +static struct mlx5hws_action * +mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size) +{ + struct mlx5hws_action_reformat_header reformat_hdr = {}; + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB; + enum mlx5hws_action_type reformat_type; + u32 log_bulk_size; + + reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2; + reformat_hdr.sz = data_size; + + log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN); + return mlx5hws_action_create_reformat(ctx, reformat_type, 1, + &reformat_hdr, log_bulk_size, flags); +} + +static struct mlx5hws_action * +mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx) +{ + struct mlx5hws_action_insert_header insert_hdr = {}; + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB; + u32 log_bulk_size; + + log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN); + insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE; + insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR; + insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET; + + return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr, + log_bulk_size, flags); +} + +static struct mlx5hws_action * +mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev, + struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx) +{ + struct mlx5_flow_root_namespace *root_ns; + struct mlx5hws_context *ctx; + size_t encap_data_size; + + root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS) + return NULL; + + ctx = root_ns->fs_hws_context.hws_ctx; + if (!ctx) + return NULL; + + encap_data_size = pr_pool_ctx->encap_data_size; + switch (pr_pool_ctx->reformat_type) { + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + return mlx5_fs_dl3tnltol2_bulk_action_create(ctx); + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size); + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size); + case MLX5HWS_ACTION_TYP_INSERT_HEADER: + return mlx5_fs_insert_hdr_bulk_action_create(ctx); + default: + return NULL; + } + return NULL; +} + +static struct mlx5_fs_bulk * +mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx) +{ + struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx; + struct mlx5_fs_hws_pr_bulk *pr_bulk; + int bulk_len; + int i; + + if (!pool_ctx) + return NULL; + pr_pool_ctx = pool_ctx; + bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN; + pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL); + if (!pr_bulk) + return NULL; + + if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len)) + goto free_pr_bulk; + + for (i = 0; i < bulk_len; i++) { + pr_bulk->prs_data[i].bulk = pr_bulk; + pr_bulk->prs_data[i].offset = i; + } + + pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx); + if (!pr_bulk->hws_action) + goto cleanup_fs_bulk; + + return &pr_bulk->fs_bulk; + +cleanup_fs_bulk: + mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk); +free_pr_bulk: + kvfree(pr_bulk); + return NULL; +} + +static int +mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk) +{ + struct mlx5_fs_hws_pr_bulk *pr_bulk; + + pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk); + if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) { + mlx5_core_err(dev, "Freeing bulk before all reformats were released\n"); + return -EBUSY; + } + + mlx5hws_action_destroy(pr_bulk->hws_action); + mlx5_fs_bulk_cleanup(fs_bulk); + kvfree(pr_bulk); + + return 0; +} + +static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool) +{ + hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD, + hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO); +} + +static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = { + .bulk_create = mlx5_fs_hws_pr_bulk_create, + .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy, + .update_threshold = mlx5_hws_pool_update_threshold, +}; + +int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool, + struct mlx5_core_dev *dev, size_t encap_data_size, + enum mlx5hws_action_type reformat_type) +{ + struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx; + + if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER && + reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 && + reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 && + reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) + return -EOPNOTSUPP; + + pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL); + if (!pr_pool_ctx) + return -ENOMEM; + pr_pool_ctx->reformat_type = reformat_type; + pr_pool_ctx->encap_data_size = encap_data_size; + mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx); + return 0; +} + +void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool) +{ + struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx; + + mlx5_fs_pool_cleanup(pr_pool); + pr_pool_ctx = pr_pool->pool_ctx; + if (!pr_pool_ctx) + return; + kfree(pr_pool_ctx); +} + +struct mlx5_fs_hws_pr * +mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool) +{ + struct mlx5_fs_pool_index pool_index = {}; + struct mlx5_fs_hws_pr_bulk *pr_bulk; + int err; + + err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index); + if (err) + return ERR_PTR(err); + pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk, + fs_bulk); + return &pr_bulk->prs_data[pool_index.index]; +} + +void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool, + struct mlx5_fs_hws_pr *pr_data) +{ + struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk; + struct mlx5_fs_pool_index pool_index = {}; + struct mlx5_core_dev *dev = pr_pool->dev; + + pool_index.fs_bulk = fs_bulk; + pool_index.index = pr_data->offset; + if (mlx5_fs_pool_release_index(pr_pool, &pool_index)) + mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n"); +} + +struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data) +{ + return pr_data->bulk->hws_action; +} + +static struct mlx5hws_action * +mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx, + struct mlx5hws_action_mh_pattern *pattern) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB; + u32 log_bulk_size; + + log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN); + return mlx5hws_action_create_modify_header(ctx, 1, pattern, + log_bulk_size, flags); +} + +static struct mlx5_fs_bulk * +mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx) +{ + struct mlx5hws_action_mh_pattern *pattern; + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_fs_hws_mh_bulk *mh_bulk; + struct mlx5hws_context *ctx; + int bulk_len; + + if (!pool_ctx) + return NULL; + + root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS) + return NULL; + + ctx = root_ns->fs_hws_context.hws_ctx; + if (!ctx) + return NULL; + + pattern = pool_ctx; + bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN; + mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL); + if (!mh_bulk) + return NULL; + + if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len)) + goto free_mh_bulk; + + for (int i = 0; i < bulk_len; i++) { + mh_bulk->mhs_data[i].bulk = mh_bulk; + mh_bulk->mhs_data[i].offset = i; + } + + mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern); + if (!mh_bulk->hws_action) + goto cleanup_fs_bulk; + + return &mh_bulk->fs_bulk; + +cleanup_fs_bulk: + mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk); +free_mh_bulk: + kvfree(mh_bulk); + return NULL; +} + +static int +mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev, + struct mlx5_fs_bulk *fs_bulk) +{ + struct mlx5_fs_hws_mh_bulk *mh_bulk; + + mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk); + if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) { + mlx5_core_err(dev, "Freeing bulk before all modify header were released\n"); + return -EBUSY; + } + + mlx5hws_action_destroy(mh_bulk->hws_action); + mlx5_fs_bulk_cleanup(fs_bulk); + kvfree(mh_bulk); + + return 0; +} + +static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = { + .bulk_create = mlx5_fs_hws_mh_bulk_create, + .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy, + .update_threshold = mlx5_hws_pool_update_threshold, +}; + +int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool, + struct mlx5_core_dev *dev, + struct mlx5hws_action_mh_pattern *pattern) +{ + struct mlx5hws_action_mh_pattern *pool_pattern; + + pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL); + if (!pool_pattern) + return -ENOMEM; + pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL); + if (!pool_pattern->data) { + kfree(pool_pattern); + return -ENOMEM; + } + pool_pattern->sz = pattern->sz; + mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops, + pool_pattern); + return 0; +} + +void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool) +{ + struct mlx5hws_action_mh_pattern *pool_pattern; + + mlx5_fs_pool_cleanup(fs_hws_mh_pool); + pool_pattern = fs_hws_mh_pool->pool_ctx; + if (!pool_pattern) + return; + kfree(pool_pattern->data); + kfree(pool_pattern); +} + +struct mlx5_fs_hws_mh * +mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool) +{ + struct mlx5_fs_pool_index pool_index = {}; + struct mlx5_fs_hws_mh_bulk *mh_bulk; + int err; + + err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index); + if (err) + return ERR_PTR(err); + mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk, + fs_bulk); + return &mh_bulk->mhs_data[pool_index.index]; +} + +void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool, + struct mlx5_fs_hws_mh *mh_data) +{ + struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk; + struct mlx5_fs_pool_index pool_index = {}; + struct mlx5_core_dev *dev = mh_pool->dev; + + pool_index.fs_bulk = fs_bulk; + pool_index.index = mh_data->offset; + if (mlx5_fs_pool_release_index(mh_pool, &pool_index)) + mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n"); +} + +bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool, + struct mlx5hws_action_mh_pattern *pattern) +{ + struct mlx5hws_action_mh_pattern *pool_pattern; + int num_actions, i; + + pool_pattern = mh_pool->pool_ctx; + if (WARN_ON_ONCE(!pool_pattern)) + return false; + + if (pattern->sz != pool_pattern->sz) + return false; + num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto); + for (i = 0; i < num_actions; i++) { + if ((__force __be32)pattern->data[i] != + (__force __be32)pool_pattern->data[i]) + return false; + } + return true; +} + +struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx, + struct mlx5_fc *counter) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5_fc_bulk *fc_bulk = counter->bulk; + struct mlx5_fc_bulk_hws_data *fc_bulk_hws; + + fc_bulk_hws = &fc_bulk->hws_data; + /* try avoid locking if not necessary */ + if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) + return fc_bulk_hws->hws_action; + + mutex_lock(&fc_bulk_hws->lock); + if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) { + mutex_unlock(&fc_bulk_hws->lock); + return fc_bulk_hws->hws_action; + } + fc_bulk_hws->hws_action = + mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags); + if (!fc_bulk_hws->hws_action) { + mutex_unlock(&fc_bulk_hws->lock); + return NULL; + } + refcount_set(&fc_bulk_hws->hws_action_refcount, 1); + mutex_unlock(&fc_bulk_hws->lock); + + return fc_bulk_hws->hws_action; +} + +void mlx5_fc_put_hws_action(struct mlx5_fc *counter) +{ + struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data; + + /* try avoid locking if not necessary */ + if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount)) + return; + + mutex_lock(&fc_bulk_hws->lock); + if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) { + mutex_unlock(&fc_bulk_hws->lock); + return; + } + mlx5hws_action_destroy(fc_bulk_hws->hws_action); + fc_bulk_hws->hws_action = NULL; + mutex_unlock(&fc_bulk_hws->lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h new file mode 100644 index 000000000000..34072551dd21 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */ + +#ifndef __MLX5_FS_HWS_POOLS_H__ +#define __MLX5_FS_HWS_POOLS_H__ + +#include <linux/if_vlan.h> +#include "fs_pool.h" +#include "fs_core.h" + +#define MLX5_FS_INSERT_HDR_VLAN_ANCHOR MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START +#define MLX5_FS_INSERT_HDR_VLAN_OFFSET offsetof(struct vlan_ethhdr, h_vlan_proto) +#define MLX5_FS_INSERT_HDR_VLAN_SIZE sizeof(struct vlan_hdr) + +enum { + MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX = 0, + MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX, +}; + +struct mlx5_fs_hws_pr { + struct mlx5_fs_hws_pr_bulk *bulk; + u32 offset; + u8 hdr_idx; + u8 *data; + size_t data_size; +}; + +struct mlx5_fs_hws_pr_bulk { + struct mlx5_fs_bulk fs_bulk; + struct mlx5hws_action *hws_action; + struct mlx5_fs_hws_pr prs_data[]; +}; + +struct mlx5_fs_hws_pr_pool_ctx { + enum mlx5hws_action_type reformat_type; + size_t encap_data_size; +}; + +struct mlx5_fs_hws_mh { + struct mlx5_fs_hws_mh_bulk *bulk; + u32 offset; + u8 *data; +}; + +struct mlx5_fs_hws_mh_bulk { + struct mlx5_fs_bulk fs_bulk; + struct mlx5_fs_pool *mh_pool; + struct mlx5hws_action *hws_action; + struct mlx5_fs_hws_mh mhs_data[]; +}; + +int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool, + struct mlx5_core_dev *dev, size_t encap_data_size, + enum mlx5hws_action_type reformat_type); +void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool); + +struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool); +void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool, + struct mlx5_fs_hws_pr *pr_data); +struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data); +int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool, + struct mlx5_core_dev *dev, + struct mlx5hws_action_mh_pattern *pattern); +void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool); +struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool); +void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool, + struct mlx5_fs_hws_mh *mh_data); +bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool, + struct mlx5hws_action_mh_pattern *pattern); +struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx, + struct mlx5_fc *counter); +void mlx5_fc_put_hws_action(struct mlx5_fc *counter); +#endif /* __MLX5_FS_HWS_POOLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h index 3c8635f286ce..30ccd635b505 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h @@ -39,7 +39,6 @@ #define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg) #define MLX5HWS_TABLE_TYPE_BASE 2 -#define MLX5HWS_ACTION_STE_IDX_ANY 0 static inline bool is_mem_zero(const u8 *mem, size_t size) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c index 1bb3a6f8c3cd..b61864b32053 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c @@ -165,14 +165,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) next->match_ste.rtc_0_id, next->match_ste.rtc_1_id); if (ret) { - mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n"); - goto matcher_reconnect; + mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n"); + return ret; } } else { ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); if (ret) { - mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n"); - goto matcher_reconnect; + mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n"); + return ret; } } @@ -180,27 +180,19 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) if (prev_ft_id == tbl->ft_id) { ret = mlx5hws_table_update_connected_miss_tables(tbl); if (ret) { - mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n"); - goto matcher_reconnect; + mlx5hws_err(tbl->ctx, + "Fatal error, failed to update connected miss table\n"); + return ret; } } ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id); if (ret) { mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n"); - goto matcher_reconnect; + return ret; } return 0; - -matcher_reconnect: - if (list_empty(&tbl->matchers_list) || !prev) - list_add(&matcher->list_node, &tbl->matchers_list); - else - /* insert after prev matcher */ - list_add(&matcher->list_node, &prev->list_node); - - return ret; } static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher, @@ -208,7 +200,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher, enum mlx5hws_matcher_rtc_type rtc_type, bool is_mirror) { - struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste; + struct mlx5hws_pool_chunk *ste = &matcher->action_ste.ste; enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src; bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH; @@ -225,8 +217,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher, } static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher, - enum mlx5hws_matcher_rtc_type rtc_type, - u8 action_ste_selector) + enum mlx5hws_matcher_rtc_type rtc_type) { struct mlx5hws_matcher_attr *attr = &matcher->attr; struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0}; @@ -286,14 +277,20 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher, break; case HWS_MATCHER_RTC_TYPE_STE_ARRAY: - action_ste = &matcher->action_ste[action_ste_selector]; + action_ste = &matcher->action_ste; rtc_0_id = &action_ste->rtc_0_id; rtc_1_id = &action_ste->rtc_1_id; ste_pool = action_ste->pool; ste = &action_ste->ste; + /* Action RTC size calculation: + * log((max number of rules in matcher) * + * (max number of action STEs per rule) * + * (2 to support writing new STEs for update rule)) + */ ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) + - attr->table.sz_row_log; + attr->table.sz_row_log + + MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT; rtc_attr.log_size = ste->order; rtc_attr.log_depth = 0; rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; @@ -318,8 +315,8 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher, hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false); /* STC is a single resource (obj_id), use any STC for the ID */ - stc_pool = ctx->stc_pool[tbl->type]; - default_stc = ctx->common_res[tbl->type].default_stc; + stc_pool = ctx->stc_pool; + default_stc = ctx->common_res.default_stc; obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); rtc_attr.stc_base = obj_id; @@ -358,8 +355,7 @@ free_ste: } static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher, - enum mlx5hws_matcher_rtc_type rtc_type, - u8 action_ste_selector) + enum mlx5hws_matcher_rtc_type rtc_type) { struct mlx5hws_matcher_action_ste *action_ste; struct mlx5hws_table *tbl = matcher->tbl; @@ -375,7 +371,7 @@ static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher, ste = &matcher->match_ste.ste; break; case HWS_MATCHER_RTC_TYPE_STE_ARRAY: - action_ste = &matcher->action_ste[action_ste_selector]; + action_ste = &matcher->action_ste; rtc_0_id = action_ste->rtc_0_id; rtc_1_id = action_ste->rtc_1_id; ste_pool = action_ste->pool; @@ -466,20 +462,13 @@ static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher) if (!resize_data) return -ENOMEM; - resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes; - - resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc; - resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id; - resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id; - resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ? - src_matcher->action_ste[0].pool : - NULL; - resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc; - resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id; - resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id; - resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ? - src_matcher->action_ste[1].pool : - NULL; + resize_data->max_stes = src_matcher->action_ste.max_stes; + + resize_data->stc = src_matcher->action_ste.stc; + resize_data->rtc_0_id = src_matcher->action_ste.rtc_0_id; + resize_data->rtc_1_id = src_matcher->action_ste.rtc_1_id; + resize_data->pool = src_matcher->action_ste.max_stes ? + src_matcher->action_ste.pool : NULL; /* Place the new resized matcher on the dst matcher's list */ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data); @@ -512,49 +501,69 @@ static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher) if (resize_data->max_stes) { mlx5hws_action_free_single_stc(matcher->tbl->ctx, matcher->tbl->type, - &resize_data->action_ste[1].stc); - mlx5hws_action_free_single_stc(matcher->tbl->ctx, - matcher->tbl->type, - &resize_data->action_ste[0].stc); + &resize_data->stc); - if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, - resize_data->action_ste[1].rtc_1_id); - mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, - resize_data->action_ste[0].rtc_1_id); - } - mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, - resize_data->action_ste[1].rtc_0_id); + resize_data->rtc_1_id); + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, - resize_data->action_ste[0].rtc_0_id); - if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) { - mlx5hws_pool_destroy(resize_data->action_ste[1].pool); - mlx5hws_pool_destroy(resize_data->action_ste[0].pool); - } + resize_data->rtc_0_id); + + if (resize_data->pool) + mlx5hws_pool_destroy(resize_data->pool); } kfree(resize_data); } } -static int -hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) +static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher) { + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; struct mlx5hws_matcher_action_ste *action_ste; struct mlx5hws_table *tbl = matcher->tbl; struct mlx5hws_pool_attr pool_attr = {0}; struct mlx5hws_context *ctx = tbl->ctx; - int ret; + u32 required_stes; + u8 max_stes = 0; + int i, ret; + + if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION) + return 0; + + for (i = 0; i < matcher->num_of_at; i++) { + struct mlx5hws_action_template *at = &matcher->at[i]; + + ret = hws_matcher_check_and_process_at(matcher, at); + if (ret) { + mlx5hws_err(ctx, "Invalid at %d", i); + return ret; + } + + required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); + max_stes = max(max_stes, required_stes); + + /* Future: Optimize reparse */ + } + + /* There are no additional STEs required for matcher */ + if (!max_stes) + return 0; + + matcher->action_ste.max_stes = max_stes; - action_ste = &matcher->action_ste[action_ste_selector]; + action_ste = &matcher->action_ste; /* Allocate action STE mempool */ pool_attr.table_type = tbl->type; pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL; + /* Pool size is similar to action RTC size */ pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) + - matcher->attr.table.sz_row_log; + matcher->attr.table.sz_row_log + + MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT; hws_matcher_set_pool_attr(&pool_attr, matcher); action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr); if (!action_ste->pool) { @@ -563,7 +572,7 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) } /* Allocate action RTC */ - ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); + ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY); if (ret) { mlx5hws_err(ctx, "Failed to create action RTC\n"); goto free_ste_pool; @@ -587,18 +596,18 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) return 0; free_rtc: - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY); free_ste_pool: mlx5hws_pool_destroy(action_ste->pool); return ret; } -static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) +static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher) { struct mlx5hws_matcher_action_ste *action_ste; struct mlx5hws_table *tbl = matcher->tbl; - action_ste = &matcher->action_ste[action_ste_selector]; + action_ste = &matcher->action_ste; if (!action_ste->max_stes || matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION || @@ -606,65 +615,10 @@ static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action return; mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc); - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY); mlx5hws_pool_destroy(action_ste->pool); } -static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher) -{ - bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); - struct mlx5hws_table *tbl = matcher->tbl; - struct mlx5hws_context *ctx = tbl->ctx; - u32 required_stes; - u8 max_stes = 0; - int i, ret; - - if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION) - return 0; - - for (i = 0; i < matcher->num_of_at; i++) { - struct mlx5hws_action_template *at = &matcher->at[i]; - - ret = hws_matcher_check_and_process_at(matcher, at); - if (ret) { - mlx5hws_err(ctx, "Invalid at %d", i); - return ret; - } - - required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); - max_stes = max(max_stes, required_stes); - - /* Future: Optimize reparse */ - } - - /* There are no additional STEs required for matcher */ - if (!max_stes) - return 0; - - matcher->action_ste[0].max_stes = max_stes; - matcher->action_ste[1].max_stes = max_stes; - - ret = hws_matcher_bind_at_idx(matcher, 0); - if (ret) - return ret; - - ret = hws_matcher_bind_at_idx(matcher, 1); - if (ret) - goto free_at_0; - - return 0; - -free_at_0: - hws_matcher_unbind_at_idx(matcher, 0); - return ret; -} - -static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher) -{ - hws_matcher_unbind_at_idx(matcher, 1); - hws_matcher_unbind_at_idx(matcher, 0); -} - static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher) { struct mlx5hws_context *ctx = matcher->tbl->ctx; @@ -810,7 +764,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher) goto unbind_at; /* Allocate the RTC for the new matcher */ - ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); + ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH); if (ret) goto destroy_end_ft; @@ -822,7 +776,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher) return 0; destroy_rtc: - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH); destroy_end_ft: hws_matcher_destroy_end_ft(matcher); unbind_at: @@ -836,7 +790,7 @@ static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher) { hws_matcher_resize_uninit(matcher); hws_matcher_disconnect(matcher); - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH); hws_matcher_destroy_end_ft(matcher); hws_matcher_unbind_at(matcher); hws_matcher_unbind_mt(matcher); @@ -970,10 +924,9 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, return ret; required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); - if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) { + if (matcher->action_ste.max_stes < required_stes) { mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n", - required_stes, - matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes); + required_stes, matcher->action_ste.max_stes); return -ENOMEM; } @@ -1007,9 +960,9 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher, if (!matcher->mt) return -ENOMEM; - matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach, - sizeof(*matcher->at), - GFP_KERNEL); + matcher->at = kvcalloc(num_of_at + matcher->attr.max_num_of_at_attach, + sizeof(*matcher->at), + GFP_KERNEL); if (!matcher->at) { mlx5hws_err(ctx, "Failed to allocate action template array\n"); ret = -ENOMEM; @@ -1035,7 +988,7 @@ free_mt: static void hws_matcher_unset_templates(struct mlx5hws_matcher *matcher) { - kfree(matcher->at); + kvfree(matcher->at); kfree(matcher->mt); } @@ -1157,8 +1110,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher, return -EINVAL; } - if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes > - dst_matcher->action_ste[0].max_stes) { + if (src_matcher->action_ste.max_stes > dst_matcher->action_ste.max_stes) { mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h index 81ff487f57be..020de70270c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h @@ -18,6 +18,11 @@ /* Required depth of the main large table */ #define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2 +/* Action RTC size multiplier that is required in order + * to support rule update for rules with action STEs. + */ +#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1 + enum mlx5hws_matcher_offset { MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12, MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13, @@ -52,15 +57,11 @@ struct mlx5hws_matcher_action_ste { u8 max_stes; }; -struct mlx5hws_matcher_resize_data_node { +struct mlx5hws_matcher_resize_data { struct mlx5hws_pool_chunk stc; u32 rtc_0_id; u32 rtc_1_id; struct mlx5hws_pool *pool; -}; - -struct mlx5hws_matcher_resize_data { - struct mlx5hws_matcher_resize_data_node action_ste[2]; u8 max_stes; struct list_head list_node; }; @@ -78,7 +79,7 @@ struct mlx5hws_matcher { struct mlx5hws_matcher *col_matcher; struct mlx5hws_matcher *resize_dst; struct mlx5hws_matcher_match_ste match_ste; - struct mlx5hws_matcher_action_ste action_ste[2]; + struct mlx5hws_matcher_action_ste action_ste; struct list_head list_node; struct list_head resize_data; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h index f39d636ff39a..5121951f2778 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions { struct mlx5hws_context_attr { u16 queues; u16 queue_size; - bool bwc; /* add support for backward compatible API*/ }; struct mlx5hws_table_attr { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c index 06db5e4726ae..d9dc4f2d0dc6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c @@ -344,7 +344,7 @@ void mlx5hws_arg_write(struct mlx5hws_send_engine *queue, mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); memset(wqe_ctrl, 0, wqe_len); mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len); - memcpy(wqe_arg, arg_data, wqe_len); + memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE); send_attr.id = arg_idx++; mlx5hws_send_engine_post_end(&ctrl, &send_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h index 27ca93385b08..8ddb51980044 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h @@ -31,7 +31,7 @@ struct mlx5hws_pattern_cache_item { u8 *data; u16 num_of_actions; } mh_data; - u32 refcount; + u32 refcount; /* protected by pattern_cache lock */ struct list_head ptrn_list_node; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c index fed2d913f3b8..50a81d360bb2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c @@ -183,7 +183,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool, *seg = -1; /* Find the next free place from the buddy array */ - while (*seg == -1) { + while (*seg < 0) { for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { buddy = hws_pool_buddy_get_next_buddy(pool, i, order, @@ -194,7 +194,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool, } *seg = mlx5hws_buddy_alloc_mem(buddy, order); - if (*seg != -1) + if (*seg >= 0) goto found; if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h index de92cecbeb92..271490a51b96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h @@ -390,11 +390,6 @@ struct mlx5_ifc_definer_bits { u8 match_mask[0x160]; }; -struct mlx5_ifc_arg_bits { - u8 rsvd0[0x88]; - u8 access_pd[0x18]; -}; - struct mlx5_ifc_header_modify_pattern_in_bits { u8 modify_field_select[0x40]; @@ -428,11 +423,6 @@ struct mlx5_ifc_create_definer_in_bits { struct mlx5_ifc_definer_bits definer; }; -struct mlx5_ifc_create_arg_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; - struct mlx5_ifc_arg_bits arg; -}; - struct mlx5_ifc_create_header_modify_pattern_in_bits { struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; struct mlx5_ifc_header_modify_pattern_in_bits pattern; @@ -479,36 +469,4 @@ enum { MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1, }; -struct mlx5_ifc_alloc_packet_reformat_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 packet_reformat_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_dealloc_packet_reformat_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 packet_reformat_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_dealloc_packet_reformat_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - #endif /* MLX5_PRM_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c index e20c67a04203..a27a2d5ffc7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c @@ -129,27 +129,18 @@ static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue, static void hws_rule_save_resize_info(struct mlx5hws_rule *rule, - struct mlx5hws_send_ste_attr *ste_attr, - bool is_update) + struct mlx5hws_send_ste_attr *ste_attr) { if (!mlx5hws_matcher_is_resizable(rule->matcher)) return; - if (likely(!is_update)) { + /* resize_info might already exist (if we're in update flow) */ + if (likely(!rule->resize_info)) { rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL); if (unlikely(!rule->resize_info)) { pr_warn("HWS: resize info isn't allocated for rule\n"); return; } - - rule->resize_info->max_stes = - rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes; - rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ? - rule->matcher->action_ste[0].pool : - NULL; - rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ? - rule->matcher->action_ste[1].pool : - NULL; } memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl, @@ -204,15 +195,14 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule, } } -static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule, - u8 action_ste_selector) +static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule) { struct mlx5hws_matcher *matcher = rule->matcher; struct mlx5hws_matcher_action_ste *action_ste; struct mlx5hws_pool_chunk ste = {0}; int ret; - action_ste = &matcher->action_ste[action_ste_selector]; + action_ste = &matcher->action_ste; ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes)); ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste); if (unlikely(ret)) { @@ -220,68 +210,29 @@ static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule, "Failed to allocate STE for rule actions"); return ret; } - rule->action_ste_idx = ste.offset; + + rule->action_ste.pool = matcher->action_ste.pool; + rule->action_ste.num_stes = matcher->action_ste.max_stes; + rule->action_ste.index = ste.offset; return 0; } -static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule, - u8 action_ste_selector) +void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste) { - struct mlx5hws_matcher *matcher = rule->matcher; struct mlx5hws_pool_chunk ste = {0}; - struct mlx5hws_pool *pool; - u8 max_stes; - - if (mlx5hws_matcher_is_resizable(matcher)) { - /* Free the original action pool if rule was resized */ - max_stes = rule->resize_info->max_stes; - pool = rule->resize_info->action_ste_pool[action_ste_selector]; - } else { - max_stes = matcher->action_ste[action_ste_selector].max_stes; - pool = matcher->action_ste[action_ste_selector].pool; - } - - /* This release is safe only when the rule match part was deleted */ - ste.order = ilog2(roundup_pow_of_two(max_stes)); - ste.offset = rule->action_ste_idx; - - mlx5hws_pool_chunk_free(pool, &ste); -} -static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule, - struct mlx5hws_rule_attr *attr) -{ - int action_ste_idx; - int ret; - - ret = hws_rule_alloc_action_ste_idx(rule, 0); - if (unlikely(ret)) - return ret; - - action_ste_idx = rule->action_ste_idx; - - ret = hws_rule_alloc_action_ste_idx(rule, 1); - if (unlikely(ret)) { - hws_rule_free_action_ste_idx(rule, 0); - return ret; - } - - /* Both pools have to return the same index */ - if (unlikely(rule->action_ste_idx != action_ste_idx)) { - pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n"); - return -EINVAL; - } + if (!action_ste->num_stes) + return; - return 0; -} + ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes)); + ste.offset = action_ste->index; -void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule) -{ - if (rule->action_ste_idx > -1) { - hws_rule_free_action_ste_idx(rule, 1); - hws_rule_free_action_ste_idx(rule, 0); - } + /* This release is safe only when the rule match STE was deleted + * (when the rule is being deleted) or replaced with the new STE that + * isn't pointing to old action STEs (when the rule is being updated). + */ + mlx5hws_pool_chunk_free(action_ste->pool, &ste); } static void hws_rule_create_init(struct mlx5hws_rule *rule, @@ -298,14 +249,24 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule, /* In update we use these rtc's */ rule->rtc_0 = 0; rule->rtc_1 = 0; - rule->action_ste_selector = 0; + + rule->action_ste.pool = NULL; + rule->action_ste.num_stes = 0; + rule->action_ste.index = -1; + + rule->status = MLX5HWS_RULE_STATUS_CREATING; } else { - rule->action_ste_selector = !rule->action_ste_selector; + rule->status = MLX5HWS_RULE_STATUS_UPDATING; } + /* Initialize the old action STE info - shallow-copy action_ste. + * In create flow this will set old_action_ste fields to initial values. + * In update flow this will save the existing action STE info, + * so that we will later use it to free old STEs. + */ + rule->old_action_ste = rule->action_ste; + rule->pending_wqes = 0; - rule->action_ste_idx = -1; - rule->status = MLX5HWS_RULE_STATUS_CREATING; /* Init default send STE attributes */ ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; @@ -315,8 +276,8 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule, /* Init default action apply */ apply->tbl_type = tbl->type; - apply->common_res = &ctx->common_res[tbl->type]; - apply->jump_to_action_stc = matcher->action_ste[0].stc.offset; + apply->common_res = &ctx->common_res; + apply->jump_to_action_stc = matcher->action_ste.stc.offset; apply->require_dep = 0; } @@ -332,8 +293,6 @@ static void hws_rule_move_init(struct mlx5hws_rule *rule, rule->rtc_1 = 0; rule->pending_wqes = 0; - rule->action_ste_idx = -1; - rule->action_ste_selector = 0; rule->status = MLX5HWS_RULE_STATUS_CREATING; rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING; } @@ -394,21 +353,17 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule, if (action_stes) { /* Allocate action STEs for rules that need more than match STE */ - if (!is_update) { - ret = hws_rule_alloc_action_ste(rule, attr); - if (ret) { - mlx5hws_err(ctx, "Failed to allocate action memory %d", ret); - mlx5hws_send_abort_new_dep_wqe(queue); - return ret; - } + ret = hws_rule_alloc_action_ste(rule); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate action memory %d", ret); + mlx5hws_send_abort_new_dep_wqe(queue); + return ret; } /* Skip RX/TX based on the dep_wqe init */ - ste_attr.rtc_0 = dep_wqe->rtc_0 ? - matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0; - ste_attr.rtc_1 = dep_wqe->rtc_1 ? - matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0; + ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0; + ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0; /* Action STEs are written to a specific index last to first */ - ste_attr.direct_index = rule->action_ste_idx + action_stes; + ste_attr.direct_index = rule->action_ste.index + action_stes; apply.next_direct_idx = ste_attr.direct_index; } else { apply.next_direct_idx = 0; @@ -459,7 +414,7 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule, if (!is_update) hws_rule_save_delete_info(rule, &ste_attr); - hws_rule_save_resize_info(rule, &ste_attr, is_update); + hws_rule_save_resize_info(rule, &ste_attr); mlx5hws_send_engine_inc_rule(queue); if (!attr->burst) @@ -480,7 +435,10 @@ static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule, attr->user_data, MLX5HWS_RULE_STATUS_DELETED); /* Rule failed now we can safely release action STEs */ - mlx5hws_rule_free_action_ste(rule); + mlx5hws_rule_free_action_ste(&rule->action_ste); + + /* Perhaps the rule failed updating - release old action STEs as well */ + mlx5hws_rule_free_action_ste(&rule->old_action_ste); /* Clear complex tag */ hws_rule_clear_delete_info(rule); @@ -517,7 +475,8 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule, } /* Rule is not completed yet */ - if (rule->status == MLX5HWS_RULE_STATUS_CREATING) + if (rule->status == MLX5HWS_RULE_STATUS_CREATING || + rule->status == MLX5HWS_RULE_STATUS_UPDATING) return -EBUSY; /* Rule failed and doesn't require cleanup */ @@ -534,7 +493,7 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule, hws_rule_gen_comp(queue, rule, false, attr->user_data, MLX5HWS_RULE_STATUS_DELETED); - mlx5hws_rule_free_action_ste(rule); + mlx5hws_rule_free_action_ste(&rule->action_ste); mlx5hws_rule_clear_resize_info(rule); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h index 495cdd17e9f3..b5ee94ac449b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h @@ -15,6 +15,8 @@ enum mlx5hws_rule_status { MLX5HWS_RULE_STATUS_UNKNOWN, MLX5HWS_RULE_STATUS_CREATING, MLX5HWS_RULE_STATUS_CREATED, + MLX5HWS_RULE_STATUS_UPDATING, + MLX5HWS_RULE_STATUS_UPDATED, MLX5HWS_RULE_STATUS_DELETING, MLX5HWS_RULE_STATUS_DELETED, MLX5HWS_RULE_STATUS_FAILING, @@ -41,13 +43,17 @@ struct mlx5hws_rule_match_tag { }; }; +struct mlx5hws_rule_action_ste_info { + struct mlx5hws_pool *pool; + int index; /* STE array index */ + u8 num_stes; +}; + struct mlx5hws_rule_resize_info { - struct mlx5hws_pool *action_ste_pool[2]; u32 rtc_0; u32 rtc_1; u32 rule_idx; u8 state; - u8 max_stes; u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */ u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */ }; @@ -58,18 +64,18 @@ struct mlx5hws_rule { struct mlx5hws_rule_match_tag tag; struct mlx5hws_rule_resize_info *resize_info; }; + struct mlx5hws_rule_action_ste_info action_ste; + struct mlx5hws_rule_action_ste_info old_action_ste; u32 rtc_0; /* The RTC into which the STE was inserted */ u32 rtc_1; /* The RTC into which the STE was inserted */ - int action_ste_idx; /* STE array index */ u8 status; /* enum mlx5hws_rule_status */ - u8 action_ste_selector; /* For rule update - which action STE is in use */ u8 pending_wqes; bool skip_delete; /* For complex rules - another rule with same tag * still exists, so don't actually delete this rule. */ }; -void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule); +void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste); int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule, void *queue, void *user_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c index 883b4ed30892..cb6abc4ab7df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c @@ -377,17 +377,25 @@ static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue, *status = MLX5HWS_FLOW_OP_ERROR; } else { - /* Increase the status, this only works on good flow as the enum - * is arrange it away creating -> created -> deleting -> deleted + /* Increase the status, this only works on good flow as + * the enum is arranged this way: + * - creating -> created + * - updating -> updated + * - deleting -> deleted */ priv->rule->status++; *status = MLX5HWS_FLOW_OP_SUCCESS; - /* Rule was deleted now we can safely release action STEs - * and clear resize info - */ if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) { - mlx5hws_rule_free_action_ste(priv->rule); + /* Rule was deleted, now we can safely release + * action STEs and clear resize info + */ + mlx5hws_rule_free_action_ste(&priv->rule->action_ste); mlx5hws_rule_clear_resize_info(priv->rule); + } else if (priv->rule->status == MLX5HWS_RULE_STATUS_UPDATED) { + /* Rule was updated, free the old action STEs */ + mlx5hws_rule_free_action_ste(&priv->rule->old_action_ste); + /* Update completed - move the rule back to "created" */ + priv->rule->status = MLX5HWS_RULE_STATUS_CREATED; } } } @@ -633,6 +641,7 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn, MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, flush_in_error_en, 1); + MLX5_SET(sqc, sqc, non_wire, 1); ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME : MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; @@ -896,15 +905,18 @@ close_cq: return err; } -void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue) +static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue) { + if (!queue->num_entries) + return; /* this queue wasn't initialized */ + hws_send_ring_close(queue); kfree(queue->completed.entries); } -int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, - struct mlx5hws_send_engine *queue, - u16 queue_size) +static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + u16 queue_size) { int err; @@ -1005,7 +1017,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, u16 queue_size) { int err = 0; - u32 i; + int i = 0; /* Open one extra queue for control path */ ctx->queues = queues + 1; @@ -1021,7 +1033,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, goto free_bwc_locks; } - for (i = 0; i < ctx->queues; i++) { + /* If native API isn't supported, skip the unused native queues: + * initialize BWC queues and control queue only. + */ + if (!mlx5hws_context_native_supported(ctx)) + i = mlx5hws_bwc_get_queue_id(ctx, 0); + + for (; i < ctx->queues; i++) { err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size); if (err) goto close_send_queues; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h index b50825d6dc53..f833092235c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h @@ -189,12 +189,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue); void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue); -void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue); - -int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, - struct mlx5hws_send_engine *queue, - u16 queue_size); - void mlx5hws_send_queues_close(struct mlx5hws_context *ctx); int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c index 9576e02d00c3..ab1297531232 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c @@ -37,6 +37,7 @@ static void hws_table_set_cap_attr(struct mlx5hws_table *tbl, } static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl) +__must_hold(&tbl->ctx->ctrl_lock) { struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; struct mlx5hws_cmd_set_fte_attr fte_attr = {0}; @@ -48,8 +49,8 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl) if (tbl->type != MLX5HWS_TABLE_TYPE_FDB) return 0; - if (ctx->common_res[tbl_type].default_miss) { - ctx->common_res[tbl_type].default_miss->refcount++; + if (ctx->common_res.default_miss) { + ctx->common_res.default_miss->refcount++; return 0; } @@ -70,29 +71,28 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl) return -EINVAL; } - /* ctx->ctrl_lock must be held here */ - ctx->common_res[tbl_type].default_miss = default_miss; - ctx->common_res[tbl_type].default_miss->refcount++; + ctx->common_res.default_miss = default_miss; + ctx->common_res.default_miss->refcount++; return 0; } /* Called under ctx->ctrl_lock */ static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl) +__must_hold(&tbl->ctx->ctrl_lock) { struct mlx5hws_cmd_forward_tbl *default_miss; struct mlx5hws_context *ctx = tbl->ctx; - u8 tbl_type = tbl->type; if (tbl->type != MLX5HWS_TABLE_TYPE_FDB) return; - default_miss = ctx->common_res[tbl_type].default_miss; + default_miss = ctx->common_res.default_miss; if (--default_miss->refcount) return; mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss); - ctx->common_res[tbl_type].default_miss = NULL; + ctx->common_res.default_miss = NULL; } static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id) @@ -478,15 +478,9 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl, if (old_miss_tbl) list_del_init(&tbl->default_miss.next); - old_miss_tbl = tbl->default_miss.miss_tbl; - if (old_miss_tbl) - list_del_init(&old_miss_tbl->default_miss.head); - if (miss_tbl) list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head); - mutex_unlock(&ctx->ctrl_lock); - return 0; out: mutex_unlock(&ctx->ctrl_lock); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c index 49f22cad92bf..60cb4527588a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c @@ -8,7 +8,7 @@ #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ ((dmn)->info.caps.dmn_type##_sw_owner || \ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ - (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7)) + (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8)) bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c index 6fa06ba2d346..f57c84e5128b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c @@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, int inlen, err, eqn; void *cqc, *in; __be64 *pas; - int vector; u32 i; cq = kzalloc(sizeof(*cq), GFP_KERNEL); @@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, if (!in) goto err_cqwq; - vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev); - err = mlx5_comp_eqn_get(mdev, vector, &eqn); + err = mlx5_comp_eqn_get(mdev, 0, &eqn); if (err) { kvfree(in); goto err_cqwq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c index e94fbb015efa..c8b8ff80c7c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c @@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps, + ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps, hw_ste_arr, attr, added_stes); } @@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps, + ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps, hw_ste_arr, attr, added_stes); } @@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version) return mlx5dr_ste_get_ctx_v1(); else if (version == MLX5_STEERING_FORMAT_CONNECTX_7) return mlx5dr_ste_get_ctx_v2(); + else if (version == MLX5_STEERING_FORMAT_CONNECTX_8) + return mlx5dr_ste_get_ctx_v3(); return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h index 54a6619c3ecb..5f409dc30aca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h @@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx { /* Actions */ u32 actions_caps; - void (*set_actions_rx)(struct mlx5dr_domain *dmn, + void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *hw_ste_arr, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); - void (*set_actions_tx)(struct mlx5dr_domain *dmn, + void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *hw_ste_arr, @@ -197,7 +199,17 @@ struct mlx5dr_ste_ctx { u16 *used_hw_action_num); int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action); void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action); - + /* Actions bit set */ + void (*set_encap)(u8 *hw_ste_p, u8 *d_action, + u32 reformat_id, int size); + void (*set_push_vlan)(u8 *ste, u8 *d_action, + u32 vlan_hdr); + void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action, + u8 vlans_num); + void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action); + void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action, + u8 *scnd_d_action, u32 reformat_id, + int size); /* Send */ void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); }; @@ -205,5 +217,6 @@ struct mlx5dr_ste_ctx { struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void); struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void); struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void); +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void); #endif /* _DR_STE_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c index e9f6c7ed7a7b..42536bee55e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c @@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste, } static void -dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, +dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, @@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, } static void -dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, +dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c index 1d49704b9542..7f83d77c43ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c @@ -5,136 +5,6 @@ #include "mlx5_ifc_dr_ste_v1.h" #include "dr_ste_v1.h" -#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ - ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ - DR_STE_V1_LU_TYPE_##lookup_type##_O) - -enum dr_ste_v1_entry_format { - DR_STE_V1_TYPE_BWC_BYTE = 0x0, - DR_STE_V1_TYPE_BWC_DW = 0x1, - DR_STE_V1_TYPE_MATCH = 0x2, - DR_STE_V1_TYPE_MATCH_RANGES = 0x7, -}; - -/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */ -enum { - DR_STE_V1_LU_TYPE_NOP = 0x0000, - DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002, - DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102, - DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003, - DR_STE_V1_LU_TYPE_IBL4 = 0x0103, - DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004, - DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104, - DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005, - DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105, - DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006, - DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007, - DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008, - DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108, - DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009, - DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109, - DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a, - DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a, - DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b, - DR_STE_V1_LU_TYPE_MPLS_O = 0x010b, - DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c, - DR_STE_V1_LU_TYPE_MPLS_I = 0x010c, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d, - DR_STE_V1_LU_TYPE_GRE = 0x010d, - DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e, - DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f, - DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f, - DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110, - DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011, - DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111, - DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112, - DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113, - DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114, - DR_STE_V1_LU_TYPE_INVALID = 0x00ff, - DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, -}; - -enum dr_ste_v1_header_anchors { - DR_STE_HEADER_ANCHOR_START_OUTER = 0x00, - DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02, - DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07, - DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13, - DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19, -}; - -enum dr_ste_v1_action_size { - DR_STE_ACTION_SINGLE_SZ = 4, - DR_STE_ACTION_DOUBLE_SZ = 8, - DR_STE_ACTION_TRIPLE_SZ = 12, -}; - -enum dr_ste_v1_action_insert_ptr_attr { - DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */ - DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */ - DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */ -}; - -enum dr_ste_v1_action_id { - DR_STE_V1_ACTION_ID_NOP = 0x00, - DR_STE_V1_ACTION_ID_COPY = 0x05, - DR_STE_V1_ACTION_ID_SET = 0x06, - DR_STE_V1_ACTION_ID_ADD = 0x07, - DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08, - DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09, - DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a, - DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b, - DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c, - DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d, - DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e, - DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f, - DR_STE_V1_ACTION_ID_ASO = 0x12, - DR_STE_V1_ACTION_ID_TRAILER = 0x13, - DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14, - DR_STE_V1_ACTION_ID_MAX = 0x21, - /* use for special cases */ - DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22, -}; - -enum { - DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, - DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, - DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, - DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, - DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, - DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, - DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, - DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, - DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, - DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, - DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, - DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, - DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, - DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, - DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, - DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91, -}; - -enum dr_ste_v1_aso_ctx_type { - DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2, -}; - static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = { [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, @@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id) MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id); } -static void dr_ste_v1_set_reparse(u8 *hw_ste_p) +void dr_ste_v1_set_reparse(u8 *hw_ste_p) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1); } -static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, - u32 reformat_id, int size) +void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size) { MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER); @@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, - u32 vlan_hdr) +void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr) { MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE); @@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) +void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) { MLX5_SET(ste_single_action_remove_header_size_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); @@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, - u8 *frst_s_action, - u8 *scnd_d_action, - u32 reformat_id, - int size) +void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action, + u32 reformat_id, int size) { /* Remove L2 headers */ MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id, @@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) +void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) { MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); @@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste, dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES); } -void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, +void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, @@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); + ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; @@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_encap = true; } - dr_ste_v1_set_push_vlan(last_ste, action, - attr->vlans.headers[i]); + ste_ctx->set_push_vlan(last_ste, action, + attr->vlans.headers[i]); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_encap = true; } - dr_ste_v1_set_encap(last_ste, action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap(last_ste, action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) { @@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, } d_action = action + DR_STE_ACTION_SINGLE_SZ; - dr_ste_v1_set_encap_l3(last_ste, - action, d_action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap_l3(last_ste, + action, d_action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_TRIPLE_SZ; action += DR_STE_ACTION_TRIPLE_SZ; } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) { @@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, +void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, @@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, allow_modify_hdr = false; allow_ctr = false; } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) { - dr_ste_v1_set_rx_decap(last_ste, action); + ste_ctx->set_rx_decap(last_ste, action); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; allow_modify_hdr = false; @@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); + ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; allow_ctr = false; @@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_push_vlan(last_ste, action, - attr->vlans.headers[i]); + ste_ctx->set_push_vlan(last_ste, action, + attr->vlans.headers[i]); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_encap(last_ste, action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap(last_ste, action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; @@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, d_action = action + DR_STE_ACTION_SINGLE_SZ; - dr_ste_v1_set_encap_l3(last_ste, - action, d_action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap_l3(last_ste, + action, d_action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_TRIPLE_SZ; allow_modify_hdr = false; } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) { @@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action, MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter); } -#define DR_STE_DECAP_L3_ACTION_NUM 8 -#define DR_STE_L2_HDR_MAX_SZ 20 - int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, @@ -2330,7 +2194,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, - + /* Actions bit set */ + .set_encap = &dr_ste_v1_set_encap, + .set_push_vlan = &dr_ste_v1_set_push_vlan, + .set_pop_vlan = &dr_ste_v1_set_pop_vlan, + .set_rx_decap = &dr_ste_v1_set_rx_decap, + .set_encap_l3 = &dr_ste_v1_set_encap_l3, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h index e2fc69867088..a8d9e308d339 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h @@ -7,6 +7,138 @@ #include "dr_types.h" #include "dr_ste.h" +#define DR_STE_DECAP_L3_ACTION_NUM 8 +#define DR_STE_L2_HDR_MAX_SZ 20 +#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ + ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ + DR_STE_V1_LU_TYPE_##lookup_type##_O) + +enum dr_ste_v1_entry_format { + DR_STE_V1_TYPE_BWC_BYTE = 0x0, + DR_STE_V1_TYPE_BWC_DW = 0x1, + DR_STE_V1_TYPE_MATCH = 0x2, + DR_STE_V1_TYPE_MATCH_RANGES = 0x7, +}; + +/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */ +enum { + DR_STE_V1_LU_TYPE_NOP = 0x0000, + DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002, + DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102, + DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003, + DR_STE_V1_LU_TYPE_IBL4 = 0x0103, + DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004, + DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104, + DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005, + DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105, + DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006, + DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007, + DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008, + DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108, + DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009, + DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109, + DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a, + DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a, + DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b, + DR_STE_V1_LU_TYPE_MPLS_O = 0x010b, + DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c, + DR_STE_V1_LU_TYPE_MPLS_I = 0x010c, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d, + DR_STE_V1_LU_TYPE_GRE = 0x010d, + DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e, + DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f, + DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f, + DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110, + DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011, + DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111, + DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112, + DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113, + DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114, + DR_STE_V1_LU_TYPE_INVALID = 0x00ff, + DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, +}; + +enum dr_ste_v1_header_anchors { + DR_STE_HEADER_ANCHOR_START_OUTER = 0x00, + DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02, + DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07, + DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13, + DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19, +}; + +enum dr_ste_v1_action_size { + DR_STE_ACTION_SINGLE_SZ = 4, + DR_STE_ACTION_DOUBLE_SZ = 8, + DR_STE_ACTION_TRIPLE_SZ = 12, +}; + +enum dr_ste_v1_action_insert_ptr_attr { + DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */ + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */ + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */ +}; + +enum dr_ste_v1_action_id { + DR_STE_V1_ACTION_ID_NOP = 0x00, + DR_STE_V1_ACTION_ID_COPY = 0x05, + DR_STE_V1_ACTION_ID_SET = 0x06, + DR_STE_V1_ACTION_ID_ADD = 0x07, + DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09, + DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a, + DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b, + DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c, + DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d, + DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e, + DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f, + DR_STE_V1_ACTION_ID_ASO = 0x12, + DR_STE_V1_ACTION_ID_TRAILER = 0x13, + DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14, + DR_STE_V1_ACTION_ID_MAX = 0x21, + /* use for special cases */ + DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22, +}; + +enum { + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, + DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, + DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91, +}; + +enum dr_ste_v1_aso_ctx_type { + DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2, +}; + bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p); void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr); u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p); @@ -17,11 +149,18 @@ u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p); void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size); void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi); void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size); -void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, - u32 actions_caps, u8 *last_ste, +void dr_ste_v1_set_reparse(u8 *hw_ste_p); +void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size); +void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr); +void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num); +void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action, + u32 reformat_id, int size); +void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action); +void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn, + u8 *action_type_set, u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); -void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set, - u32 actions_caps, u8 *last_ste, +void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn, + u8 *action_type_set, u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter, u8 length, u32 data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c index 808b013cf48c..0882dba0f64b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c @@ -2,167 +2,7 @@ /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ #include "dr_ste_v1.h" - -enum { - DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, - DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, - DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, - DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, - DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, - DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, - DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, - DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, - DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, - DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, - DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, - DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, - DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, - DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, - DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, - DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95, -}; - -static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = { - [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, - }, - [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, - }, - [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, - }, - [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15, - }, -}; +#include "dr_ste_v2.h" static struct mlx5dr_ste_ctx ste_ctx_v2 = { /* Builders */ @@ -223,7 +63,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = { .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, - + /* Actions bit set */ + .set_encap = &dr_ste_v1_set_encap, + .set_push_vlan = &dr_ste_v1_set_push_vlan, + .set_pop_vlan = &dr_ste_v1_set_pop_vlan, + .set_rx_decap = &dr_ste_v1_set_rx_decap, + .set_encap_l3 = &dr_ste_v1_set_encap_l3, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h new file mode 100644 index 000000000000..d853fde49cfc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef _DR_STE_V2_ +#define _DR_STE_V2_ + +enum { + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, + DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95, +}; + +static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15, + }, +}; + +#endif /* _DR_STE_V2_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c new file mode 100644 index 000000000000..cc60ce1d274e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "dr_ste_v1.h" +#include "dr_ste_v2.h" + +static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action, + u32 reformat_id, int size) +{ + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id, + DR_STE_V1_ACTION_ID_INSERT_POINTER); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2); + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id); + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes, + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP); + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action, + u32 vlan_hdr) +{ + MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id, + DR_STE_V1_ACTION_ID_INSERT_INLINE); + /* The hardware expects here offset to vlan header in words (2 byte) */ + MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset, + HDR_LEN_L2_MACS >> 1); + MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr); + dr_ste_v1_set_reparse(ste); +} + +static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, + u8 vlans_num) +{ + MLX5_SET(ste_single_action_remove_header_size_v3, s_action, + action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v3, s_action, + start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_single_action_remove_header_size_v3, s_action, + remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p, + u8 *frst_s_action, + u8 *scnd_d_action, + u32 reformat_id, + int size) +{ + /* Remove L2 headers */ + MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); + MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor, + DR_STE_HEADER_ANCHOR_IPV6_IPV4); + + /* Encapsulate with given reformat ID */ + MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id, + DR_STE_V1_ACTION_ID_INSERT_POINTER); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2); + MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id); + MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes, + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action) +{ + MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); + MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1); + MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1); + MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor, + DR_STE_HEADER_ANCHOR_INNER_MAC); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static int +dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz, + u8 *hw_action, u32 hw_action_sz, + uint16_t *used_hw_action_num) +{ + u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {}; + void *data_ptr = padded_data; + u16 used_actions = 0; + u32 inline_data_sz; + u32 i; + + if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM) + return -EINVAL; + + inline_data_sz = + MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data); + + /* Add an alignment padding */ + memcpy(padded_data + data_sz % inline_data_sz, data, data_sz); + + /* Remove L2L3 outer headers */ + MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); + MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1); + MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1); + MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor, + DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4); + hw_action += DR_STE_ACTION_DOUBLE_SZ; + used_actions++; /* Remove and NOP are a single double action */ + + /* Point to the last dword of the header */ + data_ptr += (data_sz / inline_data_sz) * inline_data_sz; + + /* Add the new header using inline action 4Byte at a time, the header + * is added in reversed order to the beginning of the packet to avoid + * incorrect parsing by the HW. Since header is 14B or 18B an extra + * two bytes are padded and later removed. + */ + for (i = 0; i < data_sz / inline_data_sz + 1; i++) { + void *addr_inline; + + MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id, + DR_STE_V1_ACTION_ID_INSERT_INLINE); + /* The hardware expects here offset to words (2 bytes) */ + MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0); + + /* Copy bytes one by one to avoid endianness problem */ + addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3, + hw_action, inline_data); + memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz); + hw_action += DR_STE_ACTION_DOUBLE_SZ; + used_actions++; + } + + /* Remove first 2 extra bytes */ + MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0); + /* The hardware expects here size in words (2 bytes) */ + MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1); + used_actions++; + + *used_hw_action_num = used_actions; + + return 0; +} + +static struct mlx5dr_ste_ctx ste_ctx_v3 = { + /* Builders */ + .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, + .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, + .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init, + .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init, + .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init, + .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init, + .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init, + .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init, + .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init, + .build_mpls_init = &dr_ste_v1_build_mpls_init, + .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, + .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, + .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init, + .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init, + .build_icmp_init = &dr_ste_v1_build_icmp_init, + .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, + .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, + .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, + .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, + .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init, + .build_tnl_geneve_tlv_opt_exist_init = + &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init, + .build_register_0_init = &dr_ste_v1_build_register_0_init, + .build_register_1_init = &dr_ste_v1_build_register_1_init, + .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, + .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, + .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, + .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, + .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init, + .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init, + .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init, + + /* Getters and Setters */ + .ste_init = &dr_ste_v1_init, + .set_next_lu_type = &dr_ste_v1_set_next_lu_type, + .get_next_lu_type = &dr_ste_v1_get_next_lu_type, + .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set, + .set_miss_addr = &dr_ste_v1_set_miss_addr, + .get_miss_addr = &dr_ste_v1_get_miss_addr, + .set_hit_addr = &dr_ste_v1_set_hit_addr, + .set_byte_mask = &dr_ste_v1_set_byte_mask, + .get_byte_mask = &dr_ste_v1_get_byte_mask, + + /* Actions */ + .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | + DR_STE_CTX_ACTION_CAP_RX_PUSH | + DR_STE_CTX_ACTION_CAP_RX_ENCAP, + .set_actions_rx = &dr_ste_v1_set_actions_rx, + .set_actions_tx = &dr_ste_v1_set_actions_tx, + .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr), + .modify_field_arr = dr_ste_v2_action_modify_field_arr, + .set_action_set = &dr_ste_v1_set_action_set, + .set_action_add = &dr_ste_v1_set_action_add, + .set_action_copy = &dr_ste_v1_set_action_copy, + .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list, + .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, + .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, + /* Actions bit set */ + .set_encap = &dr_ste_v3_set_encap, + .set_push_vlan = &dr_ste_v3_set_push_vlan, + .set_pop_vlan = &dr_ste_v3_set_pop_vlan, + .set_rx_decap = &dr_ste_v3_set_rx_decap, + .set_encap_l3 = &dr_ste_v3_set_encap_l3, + /* Send */ + .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, +}; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void) +{ + return &ste_ctx_v3; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c index 4b349d4005e4..8007d3f523c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c @@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, goto free_actions; } - id = dst->dest_attr.counter_id; + id = mlx5_fc_id(dst->dest_attr.counter); tmp_action = mlx5dr_action_create_flow_counter(id); if (!tmp_action) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h index fb078fa0f0cc..898c3618ff26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h @@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits { }; }; +struct mlx5_ifc_ste_single_action_remove_header_v3_bits { + u8 action_id[0x8]; + u8 start_anchor[0x7]; + u8 end_anchor[0x7]; + u8 reserved_at_16[0x1]; + u8 outer_l4_remove[0x1]; + u8 reserved_at_18[0x4]; + u8 decap[0x1]; + u8 vni_to_cqe[0x1]; + u8 qos_profile[0x2]; +}; + +struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits { + u8 action_id[0x8]; + u8 start_anchor[0x7]; + u8 start_offset[0x8]; + u8 outer_l4_remove[0x1]; + u8 reserved_at_18[0x2]; + u8 remove_size[0x6]; +}; + +struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits { + u8 action_id[0x8]; + u8 start_anchor[0x7]; + u8 start_offset[0x8]; + u8 reserved_at_17[0x9]; + + u8 inline_data[0x20]; +}; + +struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits { + u8 action_id[0x8]; + u8 start_anchor[0x7]; + u8 start_offset[0x8]; + u8 size[0x6]; + u8 attributes[0x3]; + + u8 pointer[0x20]; +}; + #endif /* MLX5_IFC_DR_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h index 3ac7dc67509f..0bb3724c10c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h @@ -160,7 +160,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev) (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) || (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) && (MLX5_CAP_GEN(dev, steering_format_version) <= - MLX5_STEERING_FORMAT_CONNECTX_7))); + MLX5_STEERING_FORMAT_CONNECTX_8))); } /* buddy functions & structure */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c index 1bed75eca97d..740b719e7072 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c @@ -382,6 +382,7 @@ err_alloc_bfreg: bool mlx5_wc_support_get(struct mlx5_core_dev *mdev) { + struct mutex *wc_state_lock = &mdev->wc_state_lock; struct mlx5_core_dev *parent = NULL; if (!MLX5_CAP_GEN(mdev, bf)) { @@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev) */ goto out; - mutex_lock(&mdev->wc_state_lock); - - if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) - goto unlock; - #ifdef CONFIG_MLX5_SF - if (mlx5_core_is_sf(mdev)) + if (mlx5_core_is_sf(mdev)) { parent = mdev->priv.parent_mdev; + wc_state_lock = &parent->wc_state_lock; + } #endif - if (parent) { - mutex_lock(&parent->wc_state_lock); + mutex_lock(wc_state_lock); + if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) + goto unlock; + + if (parent) { mlx5_core_test_wc(parent); mlx5_core_dbg(mdev, "parent set wc_state=%d\n", parent->wc_state); mdev->wc_state = parent->wc_state; - mutex_unlock(&parent->wc_state_lock); + } else { + mlx5_core_test_wc(mdev); } - mlx5_core_test_wc(mdev); - unlock: - mutex_unlock(&mdev->wc_state_lock); + mutex_unlock(wc_state_lock); out: mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state); diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index 46245e0b2462..43c84900369a 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -14,7 +14,6 @@ #define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000 #define MLXFW_FSM_STATE_WAIT_ROUNDS \ (MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS) -#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20)) static const int mlxfw_fsm_state_errno[] = { [MLXFW_FSM_STATE_ERR_ERROR] = -EIO, @@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev, return err; } - comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE); if (comp->data_size > comp_max_size) { MLXFW_ERR_MSG(mlxfw_dev, extack, "Component size is bigger than limit", -EINVAL); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 4a79c0d7e7ad..2bb2b77351bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -35,6 +35,7 @@ #include "reg.h" #include "resources.h" #include "../mlxfw/mlxfw.h" +#include "txheader.h" static LIST_HEAD(mlxsw_core_driver_list); static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); @@ -677,7 +678,7 @@ struct mlxsw_reg_trans { struct list_head bulk_list; struct mlxsw_core *core; struct sk_buff *tx_skb; - struct mlxsw_tx_info tx_info; + struct mlxsw_txhdr_info txhdr_info; struct delayed_work timeout_dw; unsigned int retries; u64 tid; @@ -737,12 +738,11 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, if (!skb) return -ENOMEM; - trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, - skb->data + mlxsw_core->driver->txhdr_len, - skb->len - mlxsw_core->driver->txhdr_len); + trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, skb->data, + skb->len); atomic_set(&trans->active, 1); - err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); + err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->txhdr_info); if (err) { dev_kfree_skb(skb); return err; @@ -944,7 +944,7 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * - sizeof(u32) + mlxsw_core->driver->txhdr_len); + sizeof(u32) + MLXSW_TXHDR_LEN); if (mlxsw_core->emad.enable_string_tlv) emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); if (mlxsw_core->emad.enable_latency_tlv) @@ -984,8 +984,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, list_add_tail(&trans->bulk_list, bulk_list); trans->core = mlxsw_core; trans->tx_skb = skb; - trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; - trans->tx_info.is_emad = true; + trans->txhdr_info.tx_info.local_port = MLXSW_PORT_CPU_PORT; + trans->txhdr_info.tx_info.is_emad = true; INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); trans->tid = tid; init_completion(&trans->completion); @@ -995,7 +995,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, trans->type = type; mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid); - mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); spin_lock_bh(&mlxsw_core->emad.trans_list_lock); list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); @@ -2330,10 +2329,10 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) + const struct mlxsw_txhdr_info *txhdr_info) { return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, - tx_info); + txhdr_info); } EXPORT_SYMBOL(mlxsw_core_skb_transmit); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 6d11225594dd..1a871397a6df 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -72,7 +72,14 @@ struct mlxsw_tx_info { bool is_emad; }; +struct mlxsw_txhdr_info { + struct mlxsw_tx_info tx_info; + bool data; + u16 max_fid; /* Used for PTP packets which are sent as data. */ +}; + struct mlxsw_rx_md_info { + struct napi_struct *napi; u32 cookie_index; u32 latency; u32 tx_congestion; @@ -94,7 +101,7 @@ struct mlxsw_rx_md_info { bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, const struct mlxsw_tx_info *tx_info); int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); + const struct mlxsw_txhdr_info *txhdr_info); void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, u16 local_port); @@ -425,8 +432,6 @@ struct mlxsw_driver { int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core, const struct devlink_trap_policer *policer, u64 *p_drops); - void (*txhdr_construct)(struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); int (*resources_register)(struct mlxsw_core *mlxsw_core); int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, @@ -439,7 +444,6 @@ struct mlxsw_driver { void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, u16 local_port); - u8 txhdr_len; const struct mlxsw_config_profile *profile; bool sdq_supports_cqe_v2; }; @@ -486,7 +490,7 @@ struct mlxsw_bus { bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); + const struct mlxsw_txhdr_info *txhdr_info); int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod, u32 in_mod, bool out_mbox_direct, char *in_mbox, size_t in_mbox_size, diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 1e150ce1c73a..f9f565c1036d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -516,7 +516,7 @@ static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv, } static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) + const struct mlxsw_txhdr_info *txhdr_info) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index d6f37456fb31..5b44c931b660 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -21,6 +21,7 @@ #include "cmd.h" #include "port.h" #include "resources.h" +#include "txheader.h" #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) @@ -737,6 +738,7 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe) } static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, + struct napi_struct *napi, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, enum mlxsw_pci_cqe_v cqe_v, char *cqe) @@ -807,6 +809,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, } mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe); + mlxsw_skb_cb(skb)->rx_md_info.napi = napi; mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); @@ -869,7 +872,7 @@ static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget) continue; } - mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq, wqe_counter, q->u.cq.v, cqe); if (++work_done == budget) @@ -2093,6 +2096,39 @@ static void mlxsw_pci_fini(void *bus_priv) mlxsw_pci_free_irq_vectors(mlxsw_pci); } +static int mlxsw_pci_txhdr_construct(struct sk_buff *skb, + const struct mlxsw_txhdr_info *txhdr_info) +{ + const struct mlxsw_tx_info tx_info = txhdr_info->tx_info; + char *txhdr; + + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) + return -ENOMEM; + + txhdr = skb_push(skb, MLXSW_TXHDR_LEN); + memset(txhdr, 0, MLXSW_TXHDR_LEN); + + mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); + mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); + mlxsw_tx_hdr_swid_set(txhdr, 0); + + if (unlikely(txhdr_info->data)) { + u16 fid = txhdr_info->max_fid + tx_info.local_port - 1; + + mlxsw_tx_hdr_rx_is_router_set(txhdr, true); + mlxsw_tx_hdr_fid_valid_set(txhdr, true); + mlxsw_tx_hdr_fid_set(txhdr, fid); + mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA); + } else { + mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); + mlxsw_tx_hdr_control_tclass_set(txhdr, 1); + mlxsw_tx_hdr_port_mid_set(txhdr, tx_info.local_port); + mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); + } + + return 0; +} + static struct mlxsw_pci_queue * mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, const struct mlxsw_tx_info *tx_info) @@ -2120,7 +2156,7 @@ static bool mlxsw_pci_skb_transmit_busy(void *bus_priv, } static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) + const struct mlxsw_txhdr_info *txhdr_info) { struct mlxsw_pci *mlxsw_pci = bus_priv; struct mlxsw_pci_queue *q; @@ -2129,13 +2165,17 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, int i; int err; + err = mlxsw_pci_txhdr_construct(skb, txhdr_info); + if (err) + return err; + if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) { err = skb_linearize(skb); if (err) return err; } - q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); + q = mlxsw_pci_sdq_pick(mlxsw_pci, &txhdr_info->tx_info); spin_lock_bh(&q->lock); elem_info = mlxsw_pci_queue_elem_info_producer_get(q); if (!elem_info) { @@ -2143,7 +2183,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, err = -EAGAIN; goto unlock; } - mlxsw_skb_cb(skb)->tx_info = *tx_info; + mlxsw_skb_cb(skb)->tx_info = txhdr_info->tx_info; elem_info->sdq.skb = skb; wqe = elem_info->elem; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3f5e5d99251b..d714311fd884 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -107,74 +107,6 @@ static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 }; -/* tx_hdr_version - * Tx header version. - * Must be set to 1. - */ -MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); - -/* tx_hdr_ctl - * Packet control type. - * 0 - Ethernet control (e.g. EMADs, LACP) - * 1 - Ethernet data - */ -MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); - -/* tx_hdr_proto - * Packet protocol type. Must be set to 1 (Ethernet). - */ -MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); - -/* tx_hdr_rx_is_router - * Packet is sent from the router. Valid for data packets only. - */ -MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); - -/* tx_hdr_fid_valid - * Indicates if the 'fid' field is valid and should be used for - * forwarding lookup. Valid for data packets only. - */ -MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); - -/* tx_hdr_swid - * Switch partition ID. Must be set to 0. - */ -MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); - -/* tx_hdr_control_tclass - * Indicates if the packet should use the control TClass and not one - * of the data TClasses. - */ -MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); - -/* tx_hdr_etclass - * Egress TClass to be used on the egress device on the egress port. - */ -MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); - -/* tx_hdr_port_mid - * Destination local port for unicast packets. - * Destination multicast ID for multicast packets. - * - * Control packets are directed to a specific egress port, while data - * packets are transmitted through the CPU port (0) into the switch partition, - * where forwarding rules are applied. - */ -MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); - -/* tx_hdr_fid - * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is - * set, otherwise calculated based on the packet's VID using VID to FID mapping. - * Valid for data packets only. - */ -MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16); - -/* tx_hdr_type - * 0 - Data packets - * 6 - Control packets - */ -MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); - int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, unsigned int counter_index, bool clear, u64 *packets, u64 *bytes) @@ -233,61 +165,6 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, counter_index); } -void mlxsw_sp_txhdr_construct(struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) -{ - char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); - - memset(txhdr, 0, MLXSW_TXHDR_LEN); - - mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); - mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); - mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); - mlxsw_tx_hdr_swid_set(txhdr, 0); - mlxsw_tx_hdr_control_tclass_set(txhdr, 1); - mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); - mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); -} - -int -mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) -{ - char *txhdr; - u16 max_fid; - int err; - - if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { - err = -ENOMEM; - goto err_skb_cow_head; - } - - if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) { - err = -EIO; - goto err_res_valid; - } - max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID); - - txhdr = skb_push(skb, MLXSW_TXHDR_LEN); - memset(txhdr, 0, MLXSW_TXHDR_LEN); - - mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); - mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); - mlxsw_tx_hdr_rx_is_router_set(txhdr, true); - mlxsw_tx_hdr_fid_valid_set(txhdr, true); - mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1); - mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA); - return 0; - -err_res_valid: -err_skb_cow_head: - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb); - return err; -} - static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb) { unsigned int type; @@ -299,30 +176,49 @@ static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb) return !!ptp_parse_header(skb, type); } -static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) +static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core, + struct sk_buff *skb, + struct mlxsw_txhdr_info *txhdr_info) { - struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + /* Resource validation was done as part of PTP init. */ + u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID); + + txhdr_info->data = true; + txhdr_info->max_fid = max_fid; +} - /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp - * need special handling and cannot be transmitted as regular control - * packets. +static struct sk_buff * +mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb) +{ + /* In some Spectrum ASICs, in order for PTP event packets to have their + * correction field correctly set on the egress port they must be + * transmitted as data packets. Such packets ingress the ASIC via the + * CPU port and must have a VLAN tag, as the CPU port is not configured + * with a PVID. Push the default VLAN (4095), which is configured as + * egress untagged on all the ports. */ - if (unlikely(mlxsw_sp_skb_requires_ts(skb))) - return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core, - mlxsw_sp_port, skb, - tx_info); + if (skb_vlan_tagged(skb)) + return skb; - if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb); - return -ENOMEM; - } + return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + MLXSW_SP_DEFAULT_VID); +} - mlxsw_sp_txhdr_construct(skb, tx_info); - return 0; +static struct sk_buff * +mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, + struct mlxsw_txhdr_info *txhdr_info) +{ + if (likely(!mlxsw_sp_skb_requires_ts(skb))) + return skb; + + if (!mlxsw_sp->ptp_ops->tx_as_data) + return skb; + + /* Special handling for PTP events that require a time stamp and cannot + * be transmitted as regular control packets. + */ + mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info); + return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb); } enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) @@ -721,16 +617,16 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port_pcpu_stats *pcpu_stats; - const struct mlxsw_tx_info tx_info = { - .local_port = mlxsw_sp_port->local_port, - .is_emad = false, + struct mlxsw_txhdr_info txhdr_info = { + .tx_info.local_port = mlxsw_sp_port->local_port, + .tx_info.is_emad = false, }; u64 len; int err; memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); - if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) + if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info)) return NETDEV_TX_BUSY; if (eth_skb_pad(skb)) { @@ -738,10 +634,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb, - &tx_info); - if (err) + skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info); + if (!skb) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); return NETDEV_TX_OK; + } /* TX header is consumed by HW on the way so we shouldn't count its * bytes as being sent. @@ -751,7 +648,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ - err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); + err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); @@ -2449,7 +2346,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, u64_stats_update_end(&pcpu_stats->syncp); skb->protocol = eth_type_trans(skb, skb->dev); - netif_receive_skb(skb); + napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb); } static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, @@ -2792,7 +2689,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { .get_stats_count = mlxsw_sp1_get_stats_count, .get_stats_strings = mlxsw_sp1_get_stats_strings, .get_stats = mlxsw_sp1_get_stats, - .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, }; static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { @@ -2811,7 +2707,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { .get_stats_count = mlxsw_sp2_get_stats_count, .get_stats_strings = mlxsw_sp2_get_stats_strings, .get_stats = mlxsw_sp2_get_stats, - .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct, + .tx_as_data = true, }; static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { @@ -2830,7 +2726,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { .get_stats_count = mlxsw_sp2_get_stats_count, .get_stats_strings = mlxsw_sp2_get_stats_strings, .get_stats = mlxsw_sp2_get_stats, - .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, }; struct mlxsw_sp_sample_trigger_node { @@ -3992,11 +3887,9 @@ static struct mlxsw_driver mlxsw_sp1_driver = { .trap_policer_fini = mlxsw_sp_trap_policer_fini, .trap_policer_set = mlxsw_sp_trap_policer_set, .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, - .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp1_resources_register, .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, .ptp_transmitted = mlxsw_sp_ptp_transmitted, - .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp1_config_profile, .sdq_supports_cqe_v2 = false, }; @@ -4030,10 +3923,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .trap_policer_fini = mlxsw_sp_trap_policer_fini, .trap_policer_set = mlxsw_sp_trap_policer_set, .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, - .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp2_resources_register, .ptp_transmitted = mlxsw_sp_ptp_transmitted, - .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, .sdq_supports_cqe_v2 = true, }; @@ -4067,10 +3958,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = { .trap_policer_fini = mlxsw_sp_trap_policer_fini, .trap_policer_set = mlxsw_sp_trap_policer_set, .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, - .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp2_resources_register, .ptp_transmitted = mlxsw_sp_ptp_transmitted, - .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, .sdq_supports_cqe_v2 = true, }; @@ -4102,10 +3991,8 @@ static struct mlxsw_driver mlxsw_sp4_driver = { .trap_policer_fini = mlxsw_sp_trap_policer_fini, .trap_policer_set = mlxsw_sp_trap_policer_set, .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, - .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp2_resources_register, .ptp_transmitted = mlxsw_sp_ptp_transmitted, - .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp4_config_profile, .sdq_supports_cqe_v2 = true, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 8d3c61287696..b10f80fc651b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -243,10 +243,7 @@ struct mlxsw_sp_ptp_ops { void (*get_stats_strings)(u8 **p); void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, u64 *data, int data_index); - int (*txhdr_construct)(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); + bool tx_as_data; }; struct mlxsw_sp_fid_core_ops { @@ -711,12 +708,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int *p_counter_index); void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, unsigned int counter_index); -void mlxsw_sp_txhdr_construct(struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); -int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); bool mlxsw_sp_port_dev_check(const struct net_device *dev); struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 2bed8c86b7cf..3f64cdbabfa3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev, err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); if (err) return; - mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); + err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); + if (err) + return; for (i = 0; i < len; i++) { data[data_index + i] = hw_stats[i].getter(ppcnt_pl); if (!hw_stats[i].cells_bytes) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index f07955b5439f..6a4a81c63451 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } + if (sample_act_count) { + NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported"); + return -EOPNOTSUPP; + } + err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, block, out_dev, extack); @@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } + if (mirror_act_count) { + NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported"); + return -EOPNOTSUPP; + } + err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei, block, act->sample.psample_group, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 69cd689dbc83..5afe6b155ef0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp, mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets, &bytes); - if (mr_route->mfc->mfc_un.res.pkt != packets) - mr_route->mfc->mfc_un.res.lastuse = jiffies; - mr_route->mfc->mfc_un.res.pkt = packets; - mr_route->mfc->mfc_un.res.bytes = bytes; + if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets) + WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies); + atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets); + atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes); } static void mlxsw_sp_mr_stats_update(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index d94081c7658e..ca8b9d18fbb9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -1353,6 +1353,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp2_ptp_state *ptp_state; int err; + /* Max FID will be used in data path, check validity as part of init. */ + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, FID)) + return ERR_PTR(-EIO); + ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL); if (!ptp_state) return ERR_PTR(-ENOMEM); @@ -1679,43 +1683,3 @@ int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, return 0; } - -int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) -{ - if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb); - return -ENOMEM; - } - - mlxsw_sp_txhdr_construct(skb, tx_info); - return 0; -} - -int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) -{ - /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have - * their correction field correctly set on the egress port they must be - * transmitted as data packets. Such packets ingress the ASIC via the - * CPU port and must have a VLAN tag, as the CPU port is not configured - * with a PVID. Push the default VLAN (4095), which is configured as - * egress untagged on all the ports. - */ - if (!skb_vlan_tagged(skb)) { - skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), - MLXSW_SP_DEFAULT_VID); - if (!skb) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - return -ENOMEM; - } - } - - return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb, - tx_info); -} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index c8aa1452fbb9..102db9060135 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -49,11 +49,6 @@ void mlxsw_sp1_get_stats_strings(u8 **p); void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, u64 *data, int data_index); -int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); - struct mlxsw_sp_ptp_clock * mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev); @@ -78,11 +73,6 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, struct kernel_ethtool_ts_info *info); -int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info); - #else static inline struct mlxsw_sp_ptp_clock * @@ -157,15 +147,6 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, { } -static inline int -mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) -{ - return -EOPNOTSUPP; -} - static inline struct mlxsw_sp_ptp_clock * mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev) { @@ -211,15 +192,6 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, { return -EOPNOTSUPP; } - -static inline int -mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, - struct mlxsw_sp_port *mlxsw_sp_port, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) -{ - return -EOPNOTSUPP; -} #endif static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 4b5fd71c897d..32d2e61f2b82 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, parms = mlxsw_sp_ipip_netdev_parms4(to_dev); ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, - 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0, - 0); + 0, 0, tun->net, parms.link, tun->fwmark, 0, 0); rt = ip_route_output_key(tun->net, &fl4); if (IS_ERR(rt)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 899c954e0e5f..1f9c1c86839f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port, if (err) return; - netif_receive_skb(skb); + napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb); } static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h index da51dd9d5e44..e78cba5821b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h +++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h @@ -4,6 +4,69 @@ #ifndef _MLXSW_TXHEADER_H #define _MLXSW_TXHEADER_H +/* tx_hdr_version + * Tx header version. + * Must be set to 1. + */ +MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); + +/* tx_hdr_ctl + * Packet control type. + * 0 - Ethernet control (e.g. EMADs, LACP) + * 1 - Ethernet data + */ +MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); + +/* tx_hdr_proto + * Packet protocol type. Must be set to 1 (Ethernet). + */ +MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); + +/* tx_hdr_rx_is_router + * Packet is sent from the router. Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); + +/* tx_hdr_fid_valid + * Indicates if the 'fid' field is valid and should be used for + * forwarding lookup. Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); + +/* tx_hdr_swid + * Switch partition ID. Must be set to 0. + */ +MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); + +/* tx_hdr_control_tclass + * Indicates if the packet should use the control TClass and not one + * of the data TClasses. + */ +MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); + +/* tx_hdr_port_mid + * Destination local port for unicast packets. + * Destination multicast ID for multicast packets. + * + * Control packets are directed to a specific egress port, while data + * packets are transmitted through the CPU port (0) into the switch partition, + * where forwarding rules are applied. + */ +MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); + +/* tx_hdr_fid + * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is + * set, otherwise calculated based on the packet's VID using VID to FID mapping. + * Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16); + +/* tx_hdr_type + * 0 - Data packets + * 6 - Control packets + */ +MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); + #define MLXSW_TXHDR_LEN 0x10 #define MLXSW_TXHDR_VERSION_0 0 #define MLXSW_TXHDR_VERSION_1 1 diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index 706ae6104c8e..14751f16e125 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -16,6 +16,10 @@ #include "fbnic_mac.h" #include "fbnic_rpc.h" +struct fbnic_napi_vector; + +#define FBNIC_MAX_NAPI_VECTORS 128u + struct fbnic_dev { struct device *dev; struct net_device *netdev; @@ -29,6 +33,11 @@ struct fbnic_dev { unsigned int pcs_msix_vector; unsigned short num_irqs; + struct { + u8 users; + char name[IFNAMSIZ + 9]; + } napi_irq[FBNIC_MAX_NAPI_VECTORS]; + struct delayed_work service_task; struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES]; @@ -148,6 +157,12 @@ void fbnic_hwmon_unregister(struct fbnic_dev *fbd); int fbnic_pcs_irq_enable(struct fbnic_dev *fbd); void fbnic_pcs_irq_disable(struct fbnic_dev *fbd); +void fbnic_napi_name_irqs(struct fbnic_dev *fbd); +int fbnic_napi_request_irq(struct fbnic_dev *fbd, + struct fbnic_napi_vector *nv); +void fbnic_napi_free_irq(struct fbnic_dev *fbd, + struct fbnic_napi_vector *nv); +void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr); int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler, unsigned long flags, const char *name, void *data); void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c index 2118901b25e9..aeb9f333f4c7 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c @@ -64,7 +64,7 @@ static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p) u32 i, j; *(data++) = start; - *(data++) = end - 1; + *(data++) = end; /* FBNIC_RPC_TCAM_ACT */ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) { diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index cc8ca94529ca..20cd9f5f89e2 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -40,49 +40,99 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = { #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats) #define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN -static int -fbnic_get_ts_info(struct net_device *netdev, - struct kernel_ethtool_ts_info *tsinfo) +static void +fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; - tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp); + fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version, + sizeof(drvinfo->fw_version)); +} - tsinfo->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; +static int fbnic_get_regs_len(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); - tsinfo->tx_types = - BIT(HWTSTAMP_TX_OFF) | - BIT(HWTSTAMP_TX_ON); + return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32); +} - tsinfo->rx_filters = - BIT(HWTSTAMP_FILTER_NONE) | - BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | - BIT(HWTSTAMP_FILTER_ALL); +static void fbnic_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *data) +{ + struct fbnic_net *fbn = netdev_priv(netdev); - return 0; + fbnic_csr_get_regs(fbn->fbd, data, ®s->version); } -static void -fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig) { - struct fbnic_net *fbn = netdev_priv(netdev); - struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_net *clone; - fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version, - sizeof(drvinfo->fw_version)); + clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL); + if (!clone) + return NULL; + + memset(clone->tx, 0, sizeof(clone->tx)); + memset(clone->rx, 0, sizeof(clone->rx)); + memset(clone->napi, 0, sizeof(clone->napi)); + return clone; } -static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter) +static void fbnic_clone_swap_cfg(struct fbnic_net *orig, + struct fbnic_net *clone) { - if (counter->reported) - *stat = counter->value; + swap(clone->rcq_size, orig->rcq_size); + swap(clone->hpq_size, orig->hpq_size); + swap(clone->ppq_size, orig->ppq_size); + swap(clone->txq_size, orig->txq_size); + swap(clone->num_rx_queues, orig->num_rx_queues); + swap(clone->num_tx_queues, orig->num_tx_queues); + swap(clone->num_napi, orig->num_napi); +} + +static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + int i, j; + + for (i = 0; i < nv->txt_count; i++) { + fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0); + fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1); + fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl); + } + + for (j = 0; j < nv->rxt_count; j++, i++) { + fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0); + fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1); + fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl); + } +} + +static void fbnic_clone_swap(struct fbnic_net *orig, + struct fbnic_net *clone) +{ + struct fbnic_dev *fbd = orig->fbd; + unsigned int i; + + for (i = 0; i < max(clone->num_napi, orig->num_napi); i++) + fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i); + for (i = 0; i < orig->num_napi; i++) + fbnic_aggregate_vector_counters(orig, orig->napi[i]); + + fbnic_clone_swap_cfg(orig, clone); + + for (i = 0; i < ARRAY_SIZE(orig->napi); i++) + swap(clone->napi[i], orig->napi[i]); + for (i = 0; i < ARRAY_SIZE(orig->tx); i++) + swap(clone->tx[i], orig->tx[i]); + for (i = 0; i < ARRAY_SIZE(orig->rx); i++) + swap(clone->rx[i], orig->rx[i]); +} + +static void fbnic_clone_free(struct fbnic_net *clone) +{ + kfree(clone); } static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data) @@ -97,6 +147,21 @@ static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data) } } +static void fbnic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fbnic_net *fbn = netdev_priv(dev); + const struct fbnic_stat *stat; + int i; + + fbnic_get_hw_stats(fbn->fbd); + + for (i = 0; i < FBNIC_HW_STATS_LEN; i++) { + stat = &fbnic_gstrings_hw_stats[i]; + data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset); + } +} + static int fbnic_get_sset_count(struct net_device *dev, int sset) { switch (sset) { @@ -107,19 +172,375 @@ static int fbnic_get_sset_count(struct net_device *dev, int sset) } } -static void fbnic_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) +static int fbnic_get_rss_hash_idx(u32 flow_type) { - struct fbnic_net *fbn = netdev_priv(dev); - const struct fbnic_stat *stat; - int i; + switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { + case TCP_V4_FLOW: + return FBNIC_TCP4_HASH_OPT; + case TCP_V6_FLOW: + return FBNIC_TCP6_HASH_OPT; + case UDP_V4_FLOW: + return FBNIC_UDP4_HASH_OPT; + case UDP_V6_FLOW: + return FBNIC_UDP6_HASH_OPT; + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_ESP_V4_FLOW: + case SCTP_V4_FLOW: + case IPV4_FLOW: + case IPV4_USER_FLOW: + return FBNIC_IPV4_HASH_OPT; + case AH_V6_FLOW: + case ESP_V6_FLOW: + case AH_ESP_V6_FLOW: + case SCTP_V6_FLOW: + case IPV6_FLOW: + case IPV6_USER_FLOW: + return FBNIC_IPV6_HASH_OPT; + case ETHER_FLOW: + return FBNIC_ETHER_HASH_OPT; + } - fbnic_get_hw_stats(fbn->fbd); + return -1; +} - for (i = 0; i < FBNIC_HW_STATS_LEN; i++) { - stat = &fbnic_gstrings_hw_stats[i]; - data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset); +static int +fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd) +{ + int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type); + + if (hash_opt_idx < 0) + return -EINVAL; + + /* Report options from rss_en table in fbn */ + cmd->data = fbn->rss_flow_hash[hash_opt_idx]; + + return 0; +} + +static int fbnic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = fbn->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = fbnic_get_rss_hash_opts(fbn, cmd); + break; + } + + return ret; +} + +#define FBNIC_L2_HASH_OPTIONS \ + (RXH_L2DA | RXH_DISCARD) +#define FBNIC_L3_HASH_OPTIONS \ + (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST) +#define FBNIC_L4_HASH_OPTIONS \ + (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3) + +static int +fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd) +{ + int hash_opt_idx; + + /* Verify the type requested is correct */ + hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type); + if (hash_opt_idx < 0) + return -EINVAL; + + /* Verify the fields asked for can actually be assigned based on type */ + if (cmd->data & ~FBNIC_L4_HASH_OPTIONS || + (hash_opt_idx > FBNIC_L4_HASH_OPT && + cmd->data & ~FBNIC_L3_HASH_OPTIONS) || + (hash_opt_idx > FBNIC_IP_HASH_OPT && + cmd->data & ~FBNIC_L2_HASH_OPTIONS)) + return -EINVAL; + + fbn->rss_flow_hash[hash_opt_idx] = cmd->data; + + if (netif_running(fbn->netdev)) { + fbnic_rss_reinit(fbn->fbd, fbn); + fbnic_write_rules(fbn->fbd); + } + + return 0; +} + +static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = fbnic_set_rss_hash_opts(fbn, cmd); + break; + } + + return ret; +} + +static u32 fbnic_get_rxfh_key_size(struct net_device *netdev) +{ + return FBNIC_RPC_RSS_KEY_BYTE_LEN; +} + +static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev) +{ + return FBNIC_RPC_RSS_TBL_SIZE; +} + +static int +fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + unsigned int i; + + rxfh->hfunc = ETH_RSS_HASH_TOP; + + if (rxfh->key) { + for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) { + u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8); + + rxfh->key[i] = rss_key >> 24; + } + } + + if (rxfh->indir) { + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) + rxfh->indir[i] = fbn->indir_tbl[0][i]; + } + + return 0; +} + +static unsigned int +fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir) +{ + unsigned int i, changes = 0; + + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) { + if (fbn->indir_tbl[idx][i] == indir[i]) + continue; + + fbn->indir_tbl[idx][i] = indir[i]; + changes++; + } + + return changes; +} + +static int +fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + unsigned int i, changes = 0; + + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EINVAL; + + if (rxfh->key) { + u32 rss_key = 0; + + for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) { + rss_key >>= 8; + rss_key |= (u32)(rxfh->key[i]) << 24; + + if (i % 4) + continue; + + if (fbn->rss_key[i / 4] == rss_key) + continue; + + fbn->rss_key[i / 4] = rss_key; + changes++; + } } + + if (rxfh->indir) + changes += fbnic_set_indir(fbn, 0, rxfh->indir); + + if (changes && netif_running(netdev)) + fbnic_rss_reinit_hw(fbn->fbd, fbn); + + return 0; +} + +static void fbnic_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + + ch->max_rx = fbd->max_num_queues; + ch->max_tx = fbd->max_num_queues; + ch->max_combined = min(ch->max_rx, ch->max_tx); + ch->max_other = FBNIC_NON_NAPI_VECTORS; + + if (fbn->num_rx_queues > fbn->num_napi || + fbn->num_tx_queues > fbn->num_napi) + ch->combined_count = min(fbn->num_rx_queues, + fbn->num_tx_queues); + else + ch->combined_count = + fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi; + ch->rx_count = fbn->num_rx_queues - ch->combined_count; + ch->tx_count = fbn->num_tx_queues - ch->combined_count; + ch->other_count = FBNIC_NON_NAPI_VECTORS; +} + +static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch, + unsigned int max_napis) +{ + fbn->num_rx_queues = ch->rx_count + ch->combined_count; + fbn->num_tx_queues = ch->tx_count + ch->combined_count; + fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count, + max_napis); +} + +static int fbnic_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + unsigned int max_napis, standalone; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_net *clone; + int err; + + max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS; + standalone = ch->rx_count + ch->tx_count; + + /* Limits for standalone queues: + * - each queue has it's own NAPI (num_napi >= rx + tx + combined) + * - combining queues (combined not 0, rx or tx must be 0) + */ + if ((ch->rx_count && ch->tx_count && ch->combined_count) || + (standalone && standalone + ch->combined_count > max_napis) || + ch->rx_count + ch->combined_count > fbd->max_num_queues || + ch->tx_count + ch->combined_count > fbd->max_num_queues || + ch->other_count != FBNIC_NON_NAPI_VECTORS) + return -EINVAL; + + if (!netif_running(netdev)) { + fbnic_set_queues(fbn, ch, max_napis); + fbnic_reset_indir_tbl(fbn); + return 0; + } + + clone = fbnic_clone_create(fbn); + if (!clone) + return -ENOMEM; + + fbnic_set_queues(clone, ch, max_napis); + + err = fbnic_alloc_napi_vectors(clone); + if (err) + goto err_free_clone; + + err = fbnic_alloc_resources(clone); + if (err) + goto err_free_napis; + + fbnic_down_noidle(fbn); + err = fbnic_wait_all_queues_idle(fbn->fbd, true); + if (err) + goto err_start_stack; + + err = fbnic_set_netif_queues(clone); + if (err) + goto err_start_stack; + + /* Nothing can fail past this point */ + fbnic_flush(fbn); + + fbnic_clone_swap(fbn, clone); + + /* Reset RSS indirection table */ + fbnic_reset_indir_tbl(fbn); + + fbnic_up(fbn); + + fbnic_free_resources(clone); + fbnic_free_napi_vectors(clone); + fbnic_clone_free(clone); + + return 0; + +err_start_stack: + fbnic_flush(fbn); + fbnic_up(fbn); + fbnic_free_resources(clone); +err_free_napis: + fbnic_free_napi_vectors(clone); +err_free_clone: + fbnic_clone_free(clone); + return err; +} + +static int +fbnic_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *tsinfo) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp); + + tsinfo->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + tsinfo->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + tsinfo->rx_filters = + BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static void fbnic_get_ts_stats(struct net_device *netdev, + struct ethtool_ts_stats *ts_stats) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + u64 ts_packets, ts_lost; + struct fbnic_ring *ring; + unsigned int start; + int i; + + ts_stats->pkts = fbn->tx_stats.ts_packets; + ts_stats->lost = fbn->tx_stats.ts_lost; + for (i = 0; i < fbn->num_tx_queues; i++) { + ring = fbn->tx[i]; + do { + start = u64_stats_fetch_begin(&ring->stats.syncp); + ts_packets = ring->stats.ts_packets; + ts_lost = ring->stats.ts_lost; + } while (u64_stats_fetch_retry(&ring->stats.syncp, start)); + ts_stats->pkts += ts_packets; + ts_stats->lost += ts_lost; + } +} + +static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter) +{ + if (counter->reported) + *stat = counter->value; } static void @@ -164,44 +585,6 @@ fbnic_get_eth_mac_stats(struct net_device *netdev, &mac_stats->eth_mac.FrameTooLongErrors); } -static void fbnic_get_ts_stats(struct net_device *netdev, - struct ethtool_ts_stats *ts_stats) -{ - struct fbnic_net *fbn = netdev_priv(netdev); - u64 ts_packets, ts_lost; - struct fbnic_ring *ring; - unsigned int start; - int i; - - ts_stats->pkts = fbn->tx_stats.ts_packets; - ts_stats->lost = fbn->tx_stats.ts_lost; - for (i = 0; i < fbn->num_tx_queues; i++) { - ring = fbn->tx[i]; - do { - start = u64_stats_fetch_begin(&ring->stats.syncp); - ts_packets = ring->stats.ts_packets; - ts_lost = ring->stats.ts_lost; - } while (u64_stats_fetch_retry(&ring->stats.syncp, start)); - ts_stats->pkts += ts_packets; - ts_stats->lost += ts_lost; - } -} - -static void fbnic_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *data) -{ - struct fbnic_net *fbn = netdev_priv(netdev); - - fbnic_csr_get_regs(fbn->fbd, data, ®s->version); -} - -static int fbnic_get_regs_len(struct net_device *netdev) -{ - struct fbnic_net *fbn = netdev_priv(netdev); - - return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32); -} - static const struct ethtool_ops fbnic_ethtool_ops = { .get_drvinfo = fbnic_get_drvinfo, .get_regs_len = fbnic_get_regs_len, @@ -209,6 +592,14 @@ static const struct ethtool_ops fbnic_ethtool_ops = { .get_strings = fbnic_get_strings, .get_ethtool_stats = fbnic_get_ethtool_stats, .get_sset_count = fbnic_get_sset_count, + .get_rxnfc = fbnic_get_rxnfc, + .set_rxnfc = fbnic_set_rxnfc, + .get_rxfh_key_size = fbnic_get_rxfh_key_size, + .get_rxfh_indir_size = fbnic_get_rxfh_indir_size, + .get_rxfh = fbnic_get_rxfh, + .set_rxfh = fbnic_set_rxfh, + .get_channels = fbnic_get_channels, + .set_channels = fbnic_set_channels, .get_ts_info = fbnic_get_ts_info, .get_ts_stats = fbnic_get_ts_stats, .get_eth_mac_stats = fbnic_get_eth_mac_stats, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 8f7a2a19ddf8..bbc7c1c0c37e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -228,6 +228,63 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) tx_mbx->head = head; } +static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd, + struct fbnic_tlv_msg *msg, + struct fbnic_fw_completion *cmpl_data) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&fbd->fw_tx_lock, flags); + + /* If we are already waiting on a completion then abort */ + if (cmpl_data && fbd->cmpl_data) { + err = -EBUSY; + goto unlock_mbx; + } + + /* Record completion location and submit request */ + if (cmpl_data) + fbd->cmpl_data = cmpl_data; + + err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg, + le16_to_cpu(msg->hdr.len) * sizeof(u32), 1); + + /* If msg failed then clear completion data for next caller */ + if (err && cmpl_data) + fbd->cmpl_data = NULL; + +unlock_mbx: + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); + + return err; +} + +static void fbnic_fw_release_cmpl_data(struct kref *kref) +{ + struct fbnic_fw_completion *cmpl_data; + + cmpl_data = container_of(kref, struct fbnic_fw_completion, + ref_count); + kfree(cmpl_data); +} + +static struct fbnic_fw_completion * +fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type) +{ + struct fbnic_fw_completion *cmpl_data = NULL; + unsigned long flags; + + spin_lock_irqsave(&fbd->fw_tx_lock, flags); + if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) { + cmpl_data = fbd->cmpl_data; + kref_get(&fbd->cmpl_data->ref_count); + } + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); + + return cmpl_data; +} + /** * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data * @fbd: FBNIC device structure @@ -651,6 +708,84 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd) dev_warn(fbd->dev, "Failed to send heartbeat message\n"); } +/** + * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request + * @fbd: FBNIC device structure + * @cmpl_data: Completion data structure to store sensor response + * + * Asks the firmware to provide an update with the latest sensor data. + * The response will contain temperature and voltage readings. + * + * Return: 0 on success, negative error value on failure + */ +int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data) +{ + struct fbnic_tlv_msg *msg; + int err; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ); + if (!msg) + return -ENOMEM; + + err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data); + if (err) + goto free_message; + + return 0; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = { + FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM), + FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT), + FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_tsene_read_resp(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_fw_completion *cmpl_data; + struct fbnic_dev *fbd = opaque; + int err = 0; + + /* Verify we have a completion pointer to provide with data */ + cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, + FBNIC_TLV_MSG_ID_TSENE_READ_RESP); + if (!cmpl_data) + return -EINVAL; + + if (results[FBNIC_TSENE_ERROR]) { + err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]); + if (err) + goto exit_complete; + } + + if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) { + err = -EINVAL; + goto exit_complete; + } + + cmpl_data->u.tsene.millidegrees = + fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]); + cmpl_data->u.tsene.millivolts = + fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]); + +exit_complete: + cmpl_data->result = err; + complete(&cmpl_data->done); + fbnic_fw_put_cmpl(cmpl_data); + + return err; +} + static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = { FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index, fbnic_fw_parse_cap_resp), @@ -658,6 +793,9 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = { fbnic_fw_parse_ownership_resp), FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index, fbnic_fw_parse_heartbeat_resp), + FBNIC_TLV_PARSER(TSENE_READ_RESP, + fbnic_tsene_read_resp_index, + fbnic_fw_parse_tsene_read_resp), FBNIC_TLV_MSG_ERROR }; @@ -802,3 +940,25 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit, fw_version, str_sz); } + +void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl, + u32 msg_type) +{ + fw_cmpl->msg_type = msg_type; + init_completion(&fw_cmpl->done); + kref_init(&fw_cmpl->ref_count); +} + +void fbnic_fw_clear_compl(struct fbnic_dev *fbd) +{ + unsigned long flags; + + spin_lock_irqsave(&fbd->fw_tx_lock, flags); + fbd->cmpl_data = NULL; + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); +} + +void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl) +{ + kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h index 7cd8841920e4..fe68333d51b1 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -45,10 +45,16 @@ struct fbnic_fw_cap { }; struct fbnic_fw_completion { - struct { - s32 millivolts; - s32 millidegrees; - } tsene; + u32 msg_type; + struct completion done; + struct kref ref_count; + int result; + union { + struct { + s32 millivolts; + s32 millidegrees; + } tsene; + } u; }; void fbnic_mbx_init(struct fbnic_dev *fbd); @@ -59,6 +65,12 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd); int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership); int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll); void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd); +int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data); +void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data, + u32 msg_type); +void fbnic_fw_clear_compl(struct fbnic_dev *fbd); +void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data); #define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \ do { \ @@ -83,6 +95,8 @@ enum { FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13, FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14, FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15, + FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C, + FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D, }; #define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24) @@ -125,6 +139,13 @@ enum { }; enum { + FBNIC_TSENE_THERM = 0x0, + FBNIC_TSENE_VOLT = 0x1, + FBNIC_TSENE_ERROR = 0x2, + FBNIC_TSENE_MSG_MAX +}; + +enum { FBNIC_FW_OWNERSHIP_FLAG = 0x0, FBNIC_FW_OWNERSHIP_MSG_MAX }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c index bcd1086e3768..def8598aceec 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c @@ -66,7 +66,7 @@ void fbnic_hwmon_register(struct fbnic_dev *fbd) if (IS_ERR(fbd->hwmon)) { dev_notice(fbd->dev, "Failed to register hwmon device %pe\n", - fbd->hwmon); + fbd->hwmon); fbd->hwmon = NULL; } } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c index 914362195920..1bbc0e56f3a0 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c @@ -146,6 +146,17 @@ void fbnic_pcs_irq_disable(struct fbnic_dev *fbd) free_irq(fbd->pcs_msix_vector, fbd); } +void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr) +{ + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int irq = pci_irq_vector(pdev, nr); + + if (irq < 0) + return; + + synchronize_irq(irq); +} + int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler, unsigned long flags, const char *name, void *data) { @@ -169,6 +180,48 @@ void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data) free_irq(irq, data); } +void fbnic_napi_name_irqs(struct fbnic_dev *fbd) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(fbd->napi_irq); i++) + snprintf(fbd->napi_irq[i].name, + sizeof(fbd->napi_irq[i].name), + "%s-TxRx-%u", fbd->netdev->name, i); +} + +int fbnic_napi_request_irq(struct fbnic_dev *fbd, + struct fbnic_napi_vector *nv) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + int i = fbnic_napi_idx(nv); + int err; + + if (!fbd->napi_irq[i].users) { + err = fbnic_request_irq(fbd, nv->v_idx, + fbnic_msix_clean_rings, 0, + fbd->napi_irq[i].name, + &fbn->napi[i]); + if (err) + return err; + } + + fbd->napi_irq[i].users++; + return 0; +} + +void fbnic_napi_free_irq(struct fbnic_dev *fbd, + struct fbnic_napi_vector *nv) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + int i = fbnic_napi_idx(nv); + + if (--fbd->napi_irq[i].users) + return; + + fbnic_free_irq(fbd, nv->v_idx, &fbn->napi[i]); +} + void fbnic_free_irqs(struct fbnic_dev *fbd) { struct pci_dev *pdev = to_pci_dev(fbd->dev); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 80b82ff12c4d..14291401f463 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -686,25 +686,75 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset, MAC_STAT_TX_BROADCAST); } -static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val) +static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, + long *val) { - struct fbnic_fw_completion fw_cmpl; + struct fbnic_fw_completion *fw_cmpl; + int err = 0, retries = 5; s32 *sensor; + fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL); + if (!fw_cmpl) + return -ENOMEM; + + /* Initialize completion and queue it for FW to process */ + fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP); + switch (id) { case FBNIC_SENSOR_TEMP: - sensor = &fw_cmpl.tsene.millidegrees; + sensor = &fw_cmpl->u.tsene.millidegrees; break; case FBNIC_SENSOR_VOLTAGE: - sensor = &fw_cmpl.tsene.millivolts; + sensor = &fw_cmpl->u.tsene.millivolts; break; default: - return -EINVAL; + err = -EINVAL; + goto exit_free; + } + + err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl); + if (err) { + dev_err(fbd->dev, + "Failed to transmit TSENE read msg, err %d\n", + err); + goto exit_free; + } + + /* Allow 2 seconds for reply, resend and try up to 5 times */ + while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) { + retries--; + + if (retries == 0) { + dev_err(fbd->dev, + "Timed out waiting for TSENE read\n"); + err = -ETIMEDOUT; + goto exit_cleanup; + } + + err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL); + if (err) { + dev_err(fbd->dev, + "Failed to transmit TSENE read msg, err %d\n", + err); + goto exit_cleanup; + } + } + + /* Handle error returned by firmware */ + if (fw_cmpl->result) { + err = fw_cmpl->result; + dev_err(fbd->dev, "%s: Firmware returned error %d\n", + __func__, err); + goto exit_cleanup; } *val = *sensor; +exit_cleanup: + fbnic_fw_clear_compl(fbd); +exit_free: + fbnic_fw_put_cmpl(fw_cmpl); - return 0; + return err; } static const struct fbnic_mac fbnic_mac_asic = { diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index fc7d80db5fa6..7a96b6ee773f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -23,13 +23,7 @@ int __fbnic_open(struct fbnic_net *fbn) if (err) goto free_napi_vectors; - err = netif_set_real_num_tx_queues(fbn->netdev, - fbn->num_tx_queues); - if (err) - goto free_resources; - - err = netif_set_real_num_rx_queues(fbn->netdev, - fbn->num_rx_queues); + err = fbnic_set_netif_queues(fbn); if (err) goto free_resources; @@ -74,6 +68,8 @@ static int fbnic_open(struct net_device *netdev) struct fbnic_net *fbn = netdev_priv(netdev); int err; + fbnic_napi_name_irqs(fbn->fbd); + err = __fbnic_open(fbn); if (!err) fbnic_up(fbn); @@ -91,6 +87,7 @@ static int fbnic_stop(struct net_device *netdev) fbnic_time_stop(fbn); fbnic_fw_xmit_ownership_msg(fbn->fbd, false); + fbnic_reset_netif_queues(fbn); fbnic_free_resources(fbn); fbnic_free_napi_vectors(fbn); @@ -615,7 +612,6 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) fbn->netdev = netdev; fbn->fbd = fbd; - INIT_LIST_HEAD(&fbn->napis); fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT; fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h index b8417b300778..a392ac1cc4f2 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -11,10 +11,14 @@ #include "fbnic_rpc.h" #include "fbnic_txrx.h" +#define FBNIC_MAX_NAPI_VECTORS 128u + struct fbnic_net { struct fbnic_ring *tx[FBNIC_MAX_TXQS]; struct fbnic_ring *rx[FBNIC_MAX_RXQS]; + struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS]; + struct net_device *netdev; struct fbnic_dev *fbd; @@ -56,13 +60,12 @@ struct fbnic_net { /* Time stampinn filter config */ struct kernel_hwtstamp_config hwtstamp_config; - - struct list_head napis; }; int __fbnic_open(struct fbnic_net *fbn); void fbnic_up(struct fbnic_net *fbn); void fbnic_down(struct fbnic_net *fbn); +void fbnic_down_noidle(struct fbnic_net *fbn); struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd); void fbnic_netdev_free(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index 32702dc4a066..6cbbc2ee3e1f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -145,7 +145,7 @@ void fbnic_up(struct fbnic_net *fbn) fbnic_service_task_start(fbn); } -static void fbnic_down_noidle(struct fbnic_net *fbn) +void fbnic_down_noidle(struct fbnic_net *fbn) { fbnic_service_task_stop(fbn); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c index 1a5e1e719b30..bb11fc83367d 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c @@ -15,7 +15,7 @@ fbnic_pcs_to_net(struct phylink_pcs *pcs) } static void -fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, +fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c index 908c098cd59e..c25bd300b902 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c @@ -13,10 +13,11 @@ void fbnic_reset_indir_tbl(struct fbnic_net *fbn) unsigned int num_rx = fbn->num_rx_queues; unsigned int i; - for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) { + if (netif_is_rxfh_configured(fbn->netdev)) + return; + + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx); - fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx); - } } void fbnic_rss_key_fill(u32 *buffer) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index b5050fabe8fe..d4d7027df9a0 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -1033,20 +1033,20 @@ static int fbnic_poll(struct napi_struct *napi, int budget) if (likely(napi_complete_done(napi, work_done))) fbnic_nv_irq_rearm(nv); - return 0; + return work_done; } -static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data) +irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data) { - struct fbnic_napi_vector *nv = data; + struct fbnic_napi_vector *nv = *(void **)data; napi_schedule_irqoff(&nv->napi); return IRQ_HANDLED; } -static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, - struct fbnic_ring *rxr) +void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, + struct fbnic_ring *rxr) { struct fbnic_queue_stats *stats = &rxr->stats; @@ -1056,8 +1056,8 @@ static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, fbn->rx_stats.dropped += stats->dropped; } -static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, - struct fbnic_ring *txr) +void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, + struct fbnic_ring *txr) { struct fbnic_queue_stats *stats = &txr->stats; @@ -1099,7 +1099,6 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn, struct fbnic_napi_vector *nv) { struct fbnic_dev *fbd = nv->fbd; - u32 v_idx = nv->v_idx; int i, j; for (i = 0; i < nv->txt_count; i++) { @@ -1113,31 +1112,20 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn, fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); } - fbnic_free_irq(fbd, v_idx, nv); + fbnic_napi_free_irq(fbd, nv); page_pool_destroy(nv->page_pool); netif_napi_del(&nv->napi); - list_del(&nv->napis); + fbn->napi[fbnic_napi_idx(nv)] = NULL; kfree(nv); } void fbnic_free_napi_vectors(struct fbnic_net *fbn) { - struct fbnic_napi_vector *nv, *temp; - - list_for_each_entry_safe(nv, temp, &fbn->napis, napis) - fbnic_free_napi_vector(fbn, nv); -} - -static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv) -{ - unsigned char *dev_name = nv->napi.dev->name; + int i; - if (!nv->rxt_count) - snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name, - nv->v_idx - FBNIC_NON_NAPI_VECTORS); - else - snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name, - nv->v_idx - FBNIC_NON_NAPI_VECTORS); + for (i = 0; i < fbn->num_napi; i++) + if (fbn->napi[i]) + fbnic_free_napi_vector(fbn, fbn->napi[i]); } #define FBNIC_PAGE_POOL_FLAGS \ @@ -1222,7 +1210,7 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, nv->v_idx = v_idx; /* Tie napi to netdev */ - list_add(&nv->napis, &fbn->napis); + fbn->napi[fbnic_napi_idx(nv)] = nv; netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll); /* Record IRQ to NAPI struct */ @@ -1239,12 +1227,8 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, goto napi_del; } - /* Initialize vector name */ - fbnic_name_napi_vector(nv); - /* Request the IRQ for napi vector */ - err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings, - IRQF_SHARED, nv->name, nv); + err = fbnic_napi_request_irq(fbd, nv); if (err) goto pp_destroy; @@ -1307,7 +1291,7 @@ pp_destroy: page_pool_destroy(nv->page_pool); napi_del: netif_napi_del(&nv->napi); - list_del(&nv->napis); + fbn->napi[fbnic_napi_idx(nv)] = NULL; kfree(nv); return err; } @@ -1612,19 +1596,18 @@ free_resources: void fbnic_free_resources(struct fbnic_net *fbn) { - struct fbnic_napi_vector *nv; + int i; - list_for_each_entry(nv, &fbn->napis, napis) - fbnic_free_nv_resources(fbn, nv); + for (i = 0; i < fbn->num_napi; i++) + fbnic_free_nv_resources(fbn, fbn->napi[i]); } int fbnic_alloc_resources(struct fbnic_net *fbn) { - struct fbnic_napi_vector *nv; - int err = -ENODEV; + int i, err = -ENODEV; - list_for_each_entry(nv, &fbn->napis, napis) { - err = fbnic_alloc_nv_resources(fbn, nv); + for (i = 0; i < fbn->num_napi; i++) { + err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]); if (err) goto free_resources; } @@ -1632,12 +1615,77 @@ int fbnic_alloc_resources(struct fbnic_net *fbn) return 0; free_resources: - list_for_each_entry_continue_reverse(nv, &fbn->napis, napis) - fbnic_free_nv_resources(fbn, nv); + while (i--) + fbnic_free_nv_resources(fbn, fbn->napi[i]); return err; } +static void fbnic_set_netif_napi(struct fbnic_napi_vector *nv) +{ + int i, j; + + /* Associate Tx queue with NAPI */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, + NETDEV_QUEUE_TYPE_TX, &nv->napi); + } + + /* Associate Rx queue with NAPI */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, + NETDEV_QUEUE_TYPE_RX, &nv->napi); + } +} + +static void fbnic_reset_netif_napi(struct fbnic_napi_vector *nv) +{ + int i, j; + + /* Disassociate Tx queue from NAPI */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, + NETDEV_QUEUE_TYPE_TX, NULL); + } + + /* Disassociate Rx queue from NAPI */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, + NETDEV_QUEUE_TYPE_RX, NULL); + } +} + +int fbnic_set_netif_queues(struct fbnic_net *fbn) +{ + int i, err; + + err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues, + fbn->num_rx_queues); + if (err) + return err; + + for (i = 0; i < fbn->num_napi; i++) + fbnic_set_netif_napi(fbn->napi[i]); + + return 0; +} + +void fbnic_reset_netif_queues(struct fbnic_net *fbn) +{ + int i; + + for (i = 0; i < fbn->num_napi; i++) + fbnic_reset_netif_napi(fbn->napi[i]); +} + static void fbnic_disable_twq0(struct fbnic_ring *txr) { u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL); @@ -1670,33 +1718,34 @@ static void fbnic_disable_rcq(struct fbnic_ring *rxr) void fbnic_napi_disable(struct fbnic_net *fbn) { - struct fbnic_napi_vector *nv; + int i; - list_for_each_entry(nv, &fbn->napis, napis) { - napi_disable(&nv->napi); + for (i = 0; i < fbn->num_napi; i++) { + napi_disable(&fbn->napi[i]->napi); - fbnic_nv_irq_disable(nv); + fbnic_nv_irq_disable(fbn->napi[i]); } } void fbnic_disable(struct fbnic_net *fbn) { struct fbnic_dev *fbd = fbn->fbd; - struct fbnic_napi_vector *nv; - int i, j; + int i, j, t; + + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; - list_for_each_entry(nv, &fbn->napis, napis) { /* Disable Tx queue triads */ - for (i = 0; i < nv->txt_count; i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; + for (t = 0; t < nv->txt_count; t++) { + struct fbnic_q_triad *qt = &nv->qt[t]; fbnic_disable_twq0(&qt->sub0); fbnic_disable_tcq(&qt->cmpl); } /* Disable Rx queue triads */ - for (j = 0; j < nv->rxt_count; j++, i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; + for (j = 0; j < nv->rxt_count; j++, t++) { + struct fbnic_q_triad *qt = &nv->qt[t]; fbnic_disable_bdq(&qt->sub0, &qt->sub1); fbnic_disable_rcq(&qt->cmpl); @@ -1792,14 +1841,15 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail) void fbnic_flush(struct fbnic_net *fbn) { - struct fbnic_napi_vector *nv; + int i; - list_for_each_entry(nv, &fbn->napis, napis) { - int i, j; + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; + int j, t; /* Flush any processed Tx Queue Triads and drop the rest */ - for (i = 0; i < nv->txt_count; i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; + for (t = 0; t < nv->txt_count; t++) { + struct fbnic_q_triad *qt = &nv->qt[t]; struct netdev_queue *tx_queue; /* Clean the work queues of unprocessed work */ @@ -1816,15 +1866,11 @@ void fbnic_flush(struct fbnic_net *fbn) tx_queue = netdev_get_tx_queue(nv->napi.dev, qt->sub0.q_idx); netdev_tx_reset_queue(tx_queue); - - /* Disassociate Tx queue from NAPI */ - netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, - NETDEV_QUEUE_TYPE_TX, NULL); } /* Flush any processed Rx Queue Triads and drop the rest */ - for (j = 0; j < nv->rxt_count; j++, i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; + for (j = 0; j < nv->rxt_count; j++, t++) { + struct fbnic_q_triad *qt = &nv->qt[t]; /* Clean the work queues of unprocessed work */ fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail); @@ -1835,43 +1881,23 @@ void fbnic_flush(struct fbnic_net *fbn) fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0); qt->cmpl.pkt->buff.data_hard_start = NULL; - - /* Disassociate Rx queue from NAPI */ - netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, - NETDEV_QUEUE_TYPE_RX, NULL); } } } void fbnic_fill(struct fbnic_net *fbn) { - struct fbnic_napi_vector *nv; - - list_for_each_entry(nv, &fbn->napis, napis) { - int i, j; - - /* Configure NAPI mapping for Tx */ - for (i = 0; i < nv->txt_count; i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; - - /* Nothing to do if Tx queue is disabled */ - if (qt->sub0.flags & FBNIC_RING_F_DISABLED) - continue; + int i; - /* Associate Tx queue with NAPI */ - netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, - NETDEV_QUEUE_TYPE_TX, &nv->napi); - } + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; + int j, t; /* Configure NAPI mapping and populate pages * in the BDQ rings to use for Rx */ - for (j = 0; j < nv->rxt_count; j++, i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; - - /* Associate Rx queue with NAPI */ - netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, - NETDEV_QUEUE_TYPE_RX, &nv->napi); + for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) { + struct fbnic_q_triad *qt = &nv->qt[t]; /* Populate the header and payload BDQs */ fbnic_fill_bdq(nv, &qt->sub0); @@ -2025,21 +2051,23 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv, void fbnic_enable(struct fbnic_net *fbn) { struct fbnic_dev *fbd = fbn->fbd; - struct fbnic_napi_vector *nv; - int i, j; + int i; + + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; + int j, t; - list_for_each_entry(nv, &fbn->napis, napis) { /* Setup Tx Queue Triads */ - for (i = 0; i < nv->txt_count; i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; + for (t = 0; t < nv->txt_count; t++) { + struct fbnic_q_triad *qt = &nv->qt[t]; fbnic_enable_twq0(&qt->sub0); fbnic_enable_tcq(nv, &qt->cmpl); } /* Setup Rx Queue Triads */ - for (j = 0; j < nv->rxt_count; j++, i++) { - struct fbnic_q_triad *qt = &nv->qt[i]; + for (j = 0; j < nv->rxt_count; j++, t++) { + struct fbnic_q_triad *qt = &nv->qt[t]; fbnic_enable_bdq(&qt->sub0, &qt->sub1); fbnic_config_drop_mode_rcq(nv, &qt->cmpl); @@ -2064,10 +2092,11 @@ void fbnic_napi_enable(struct fbnic_net *fbn) { u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {}; struct fbnic_dev *fbd = fbn->fbd; - struct fbnic_napi_vector *nv; int i; - list_for_each_entry(nv, &fbn->napis, napis) { + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; + napi_enable(&nv->napi); fbnic_nv_irq_enable(nv); @@ -2096,17 +2125,18 @@ void fbnic_napi_depletion_check(struct net_device *netdev) struct fbnic_net *fbn = netdev_priv(netdev); u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {}; struct fbnic_dev *fbd = fbn->fbd; - struct fbnic_napi_vector *nv; - int i, j; + int i, j, t; + + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; - list_for_each_entry(nv, &fbn->napis, napis) { /* Find RQs which are completely out of pages */ - for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) { + for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) { /* Assume 4 pages is always enough to fit a packet * and therefore generate a completion and an IRQ. */ - if (fbnic_desc_used(&nv->qt[i].sub0) < 4 || - fbnic_desc_used(&nv->qt[i].sub1) < 4) + if (fbnic_desc_used(&nv->qt[t].sub0) < 4 || + fbnic_desc_used(&nv->qt[t].sub1) < 4) irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); } } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index 8d626287c3f4..c2a94f31f71b 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -104,14 +104,11 @@ struct fbnic_napi_vector { struct device *dev; /* Device for DMA unmapping */ struct page_pool *page_pool; struct fbnic_dev *fbd; - char name[IFNAMSIZ + 9]; u16 v_idx; u8 txt_count; u8 rxt_count; - struct list_head napis; - struct fbnic_q_triad qt[]; }; @@ -123,10 +120,18 @@ netdev_features_t fbnic_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); +void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, + struct fbnic_ring *rxr); +void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, + struct fbnic_ring *txr); + int fbnic_alloc_napi_vectors(struct fbnic_net *fbn); void fbnic_free_napi_vectors(struct fbnic_net *fbn); int fbnic_alloc_resources(struct fbnic_net *fbn); void fbnic_free_resources(struct fbnic_net *fbn); +int fbnic_set_netif_queues(struct fbnic_net *fbn); +void fbnic_reset_netif_queues(struct fbnic_net *fbn); +irqreturn_t fbnic_msix_clean_rings(int irq, void *data); void fbnic_napi_enable(struct fbnic_net *fbn); void fbnic_napi_disable(struct fbnic_net *fbn); void fbnic_enable(struct fbnic_net *fbn); @@ -137,4 +142,9 @@ void fbnic_fill(struct fbnic_net *fbn); void fbnic_napi_depletion_check(struct net_device *netdev); int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail); +static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv) +{ + return nv->v_idx - FBNIC_NON_NAPI_VECTORS; +} + #endif /* _FBNIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 1a1cbd034eda..1459acfb1e61 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1055,9 +1055,6 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev, { struct lan743x_adapter *adapter = netdev_priv(netdev); - eee->tx_lpi_timer = lan743x_csr_read(adapter, - MAC_EEE_TX_LPI_REQ_DLY_CNT); - return phylink_ethtool_get_eee(adapter->phylink, eee); } @@ -1065,24 +1062,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev, struct ethtool_keee *eee) { struct lan743x_adapter *adapter = netdev_priv(netdev); - u32 tx_lpi_timer; - - tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); - if (tx_lpi_timer != eee->tx_lpi_timer) { - u32 mac_cr = lan743x_csr_read(adapter, MAC_CR); - - /* Software should only change this field when Energy Efficient - * Ethernet Enable (EEEEN) is cleared. - * This function will trigger an autonegotiation restart and - * eee will be reenabled during link up if eee was negotiated. - */ - lan743x_mac_eee_enable(adapter, false); - lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, - eee->tx_lpi_timer); - - if (mac_cr & MAC_CR_EEE_EN_) - lan743x_mac_eee_enable(adapter, true); - } return phylink_ethtool_set_eee(adapter->phylink, eee); } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 4dc5adcda6a3..23760b613d3e 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2966,7 +2966,7 @@ static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter) return lan743x_pcs_power_reset(adapter); } -void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable) +static void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable) { u32 mac_cr; @@ -3027,10 +3027,8 @@ static void lan743x_phylink_mac_link_down(struct phylink_config *config, phy_interface_t interface) { struct net_device *netdev = to_net_dev(config->dev); - struct lan743x_adapter *adapter = netdev_priv(netdev); - netif_tx_stop_all_queues(to_net_dev(config->dev)); - lan743x_mac_eee_enable(adapter, false); + netif_tx_stop_all_queues(netdev); } static void lan743x_phylink_mac_link_up(struct phylink_config *config, @@ -3072,16 +3070,40 @@ static void lan743x_phylink_mac_link_up(struct phylink_config *config, cap & FLOW_CTRL_TX, cap & FLOW_CTRL_RX); - if (phydev) - lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi); - netif_tx_wake_all_queues(netdev); } +static void lan743x_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + lan743x_mac_eee_enable(adapter, false); +} + +static int lan743x_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + /* Software should only change this field when Energy Efficient + * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing + * EEEEN during probe, and phylink itself guarantees that + * mac_disable_tx_lpi() will have been previously called. + */ + lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, timer); + lan743x_mac_eee_enable(adapter, true); + + return 0; +} + static const struct phylink_mac_ops lan743x_phylink_mac_ops = { .mac_config = lan743x_phylink_mac_config, .mac_link_down = lan743x_phylink_mac_link_down, .mac_link_up = lan743x_phylink_mac_link_up, + .mac_disable_tx_lpi = lan743x_mac_disable_tx_lpi, + .mac_enable_tx_lpi = lan743x_mac_enable_tx_lpi, }; static int lan743x_phylink_create(struct lan743x_adapter *adapter) @@ -3095,6 +3117,9 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter) adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + adapter->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD; + adapter->phylink_config.lpi_timer_default = + lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); lan743x_phy_interface_select(adapter); @@ -3120,6 +3145,10 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter) phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces); } + memcpy(adapter->phylink_config.lpi_interfaces, + adapter->phylink_config.supported_interfaces, + sizeof(adapter->phylink_config.lpi_interfaces)); + pl = phylink_create(&adapter->phylink_config, NULL, adapter->phy_interface, &lan743x_phylink_mac_ops); @@ -3517,6 +3546,9 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, spin_lock_init(&tx->ring_lock); } + /* Ensure EEEEN is clear */ + lan743x_mac_eee_enable(adapter, false); + return 0; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 8ef897c114d3..7f73d66854be 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -1206,6 +1206,5 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter); void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, bool tx_enable, bool rx_enable); int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr); -void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable); #endif /* _LAN743X_H */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 25cb2f61986f..1efa584e7107 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -443,7 +443,7 @@ int lan966x_stats_init(struct lan966x *lan966x); void lan966x_port_config_down(struct lan966x_port *port); void lan966x_port_config_up(struct lan966x_port *port); -void lan966x_port_status_get(struct lan966x_port *port, +void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode, struct phylink_link_state *state); int lan966x_port_pcs_set(struct lan966x_port *port, struct lan966x_port_config *config); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c index 1d63903f9006..75188b99e4e7 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c @@ -88,11 +88,12 @@ static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs) } static void lan966x_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct lan966x_port *port = lan966x_pcs_to_port(pcs); - lan966x_port_status_get(port, state); + lan966x_port_status_get(port, neg_mode, state); } static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index fdfa4040d9ee..cf7de0267c32 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -284,7 +284,7 @@ void lan966x_port_config_up(struct lan966x_port *port) lan966x_port_link_up(port); } -void lan966x_port_status_get(struct lan966x_port *port, +void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode, struct phylink_link_state *state) { struct lan966x *lan966x = port->lan966x; @@ -314,7 +314,7 @@ void lan966x_port_status_get(struct lan966x_port *port, bmsr |= BMSR_ANEGCOMPLETE; lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val); - phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv); + phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lp_adv); } else { if (!state->link) return; diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index 35b057c9d0cb..35e1c0cf345e 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -28,5 +28,6 @@ config SPARX5_DCB config LAN969X_SWITCH bool "Lan969x switch driver" depends on SPARX5_SWITCH + select PAGE_POOL help This driver supports the lan969x family of network switch devices. diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index 4bf2a885a9da..d447f9e84d92 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -20,7 +20,9 @@ sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \ lan969x/lan969x.o \ lan969x/lan969x_calendar.o \ lan969x/lan969x_vcap_ag_api.o \ - lan969x/lan969x_vcap_impl.o + lan969x/lan969x_vcap_impl.o \ + lan969x/lan969x_rgmii.o \ + lan969x/lan969x_fdma.o # Provide include files ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c index c2afa2176b08..f3a9c71bea36 100644 --- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c @@ -90,9 +90,12 @@ static const struct sparx5_main_io_resource lan969x_main_iomap[] = { { TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */ { TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */ { TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */ + { TARGET_DEVRGMII, 0x30e4000, 1 }, /* 0xe30e4000 */ + { TARGET_DEVRGMII + 1, 0x30e8000, 1 }, /* 0xe30e8000 */ { TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */ { TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */ { TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */ + { TARGET_HSIO_WRAP, 0x3408000, 1 }, /* 0xe3408000 */ }; static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = { @@ -329,6 +332,7 @@ static const struct sparx5_ops lan969x_ops = { .is_port_5g = &lan969x_port_is_5g, .is_port_10g = &lan969x_port_is_10g, .is_port_25g = &lan969x_port_is_25g, + .is_port_rgmii = &lan969x_port_is_rgmii, .get_port_dev_index = &lan969x_port_dev_mapping, .get_port_dev_bit = &lan969x_get_dev_mode_bit, .get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate, @@ -336,6 +340,11 @@ static const struct sparx5_ops lan969x_ops = { .set_port_mux = &lan969x_port_mux_set, .ptp_irq_handler = &lan969x_ptp_irq_handler, .dsm_calendar_calc = &lan969x_dsm_calendar_calc, + .port_config_rgmii = &lan969x_port_config_rgmii, + .fdma_init = &lan969x_fdma_init, + .fdma_deinit = &lan969x_fdma_deinit, + .fdma_poll = &lan969x_fdma_napi_poll, + .fdma_xmit = &lan969x_fdma_xmit, }; const struct sparx5_match_data lan969x_desc = { diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h index 2489d0d32dfd..529fde3d4deb 100644 --- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h @@ -59,7 +59,24 @@ static inline bool lan969x_port_is_25g(int portno) return false; } +static inline bool lan969x_port_is_rgmii(int portno) +{ + return portno == 28 || portno == 29; +} + /* lan969x_calendar.c */ int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, struct sparx5_calendar_data *data); + +/* lan969x_rgmii.c */ +int lan969x_port_config_rgmii(struct sparx5_port *port, + struct sparx5_port_config *conf); + +/* lan969x_fdma.c */ +int lan969x_fdma_init(struct sparx5 *sparx5); +int lan969x_fdma_deinit(struct sparx5 *sparx5); +int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight); +int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb, + struct net_device *dev); + #endif diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c new file mode 100644 index 000000000000..1282f5c3ee6d --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip lan969x Switch driver + * + * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries. + */ +#include <net/page_pool/helpers.h> + +#include "../sparx5_main.h" +#include "../sparx5_main_regs.h" +#include "../sparx5_port.h" + +#include "fdma_api.h" +#include "lan969x.h" + +#define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv)) + +static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) +{ + *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr; + + return 0; +} + +static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) +{ + struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx; + struct page *page; + + page = page_pool_dev_alloc_pages(rx->page_pool); + if (unlikely(!page)) + return -ENOMEM; + + rx->page[dcb][db] = page; + + *dataptr = page_pool_get_dma_addr(page); + + return 0; +} + +static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx) +{ + struct fdma *fdma = &tx->fdma; + + for (int i = 0; i < fdma->n_dcbs; ++i) + if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i])) + return i; + + return -ENOSPC; +} + +static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight) +{ + struct fdma *fdma = &sparx5->tx.fdma; + struct sparx5_tx_buf *db; + unsigned long flags; + int i; + + spin_lock_irqsave(&sparx5->tx_lock, flags); + + for (i = 0; i < fdma->n_dcbs; ++i) { + db = &sparx5->tx.dbs[i]; + + if (!db->used) + continue; + + if (!fdma_db_is_done(fdma_db_get(fdma, i, 0))) + continue; + + db->dev->stats.tx_bytes += db->skb->len; + db->dev->stats.tx_packets++; + sparx5->tx.packets++; + + dma_unmap_single(sparx5->dev, + db->dma_addr, + db->skb->len, + DMA_TO_DEVICE); + + if (!db->ptp) + napi_consume_skb(db->skb, weight); + + db->used = false; + } + + spin_unlock_irqrestore(&sparx5->tx_lock, flags); +} + +static void lan969x_fdma_free_pages(struct sparx5_rx *rx) +{ + struct fdma *fdma = &rx->fdma; + + for (int i = 0; i < fdma->n_dcbs; ++i) { + for (int j = 0; j < fdma->n_dbs; ++j) + page_pool_put_full_page(rx->page_pool, + rx->page[i][j], false); + } +} + +static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5, + struct sparx5_rx *rx) +{ + const struct sparx5_consts *consts = sparx5->data->consts; + struct fdma *fdma = &rx->fdma; + struct sparx5_port *port; + struct frame_info fi; + struct sk_buff *skb; + struct fdma_db *db; + struct page *page; + + db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index]; + page = rx->page[fdma->dcb_index][fdma->db_index]; + + sparx5_ifh_parse(sparx5, page_address(page), &fi); + port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] : + NULL; + if (WARN_ON(!port)) + goto free_page; + + skb = build_skb(page_address(page), fdma->db_size); + if (unlikely(!skb)) + goto free_page; + + skb_mark_for_recycle(skb); + skb_put(skb, fdma_db_len_get(db)); + skb_pull(skb, IFH_LEN * sizeof(u32)); + + skb->dev = port->ndev; + + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + + sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); + skb->protocol = eth_type_trans(skb, skb->dev); + + if (test_bit(port->portno, sparx5->bridge_mask)) + skb->offload_fwd_mark = 1; + + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + return skb; + +free_page: + page_pool_recycle_direct(rx->page_pool, page); + + return NULL; +} + +static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_rx *rx = &sparx5->rx; + struct fdma *fdma = &rx->fdma; + int err; + + struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = fdma->n_dcbs * fdma->n_dbs, + .nid = NUMA_NO_NODE, + .dev = sparx5->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = fdma->db_size - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + }; + + rx->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx->page_pool)) + return PTR_ERR(rx->page_pool); + + err = fdma_alloc_coherent(sparx5->dev, fdma); + if (err) + return err; + + fdma_dcbs_init(fdma, + FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); + + return 0; +} + +static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_tx *tx = &sparx5->tx; + struct fdma *fdma = &tx->fdma; + int err; + + tx->dbs = kcalloc(fdma->n_dcbs, + sizeof(struct sparx5_tx_buf), + GFP_KERNEL); + if (!tx->dbs) + return -ENOMEM; + + err = fdma_alloc_coherent(sparx5->dev, fdma); + if (err) { + kfree(tx->dbs); + return err; + } + + fdma_dcbs_init(fdma, + FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_DONE); + + return 0; +} + +static void lan969x_fdma_rx_init(struct sparx5 *sparx5) +{ + struct fdma *fdma = &sparx5->rx.fdma; + + fdma->channel_id = FDMA_XTR_CHANNEL; + fdma->n_dcbs = FDMA_DCB_MAX; + fdma->n_dbs = 1; + fdma->priv = sparx5; + fdma->size = fdma_get_size(fdma); + fdma->db_size = PAGE_SIZE; + fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb; + fdma->ops.nextptr_cb = &fdma_nextptr_cb; + + /* Fetch a netdev for SKB and NAPI use, any will do */ + for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) { + struct sparx5_port *port = sparx5->ports[idx]; + + if (port && port->ndev) { + sparx5->rx.ndev = port->ndev; + break; + } + } +} + +static void lan969x_fdma_tx_init(struct sparx5 *sparx5) +{ + struct fdma *fdma = &sparx5->tx.fdma; + + fdma->channel_id = FDMA_INJ_CHANNEL; + fdma->n_dcbs = FDMA_DCB_MAX; + fdma->n_dbs = 1; + fdma->priv = sparx5; + fdma->size = fdma_get_size(fdma); + fdma->db_size = PAGE_SIZE; + fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb; + fdma->ops.nextptr_cb = &fdma_nextptr_cb; +} + +int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight) +{ + struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); + struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); + int old_dcb, dcb_reload, counter = 0; + struct fdma *fdma = &rx->fdma; + struct sk_buff *skb; + + dcb_reload = fdma->dcb_index; + + lan969x_fdma_tx_clear_buf(sparx5, weight); + + /* Process RX data */ + while (counter < weight) { + if (!fdma_has_frames(fdma)) + break; + + skb = lan969x_fdma_rx_get_frame(sparx5, rx); + if (!skb) + break; + + napi_gro_receive(&rx->napi, skb); + + fdma_db_advance(fdma); + counter++; + /* Check if the DCB can be reused */ + if (fdma_dcb_is_reusable(fdma)) + continue; + + fdma_db_reset(fdma); + fdma_dcb_advance(fdma); + } + + /* Allocate new pages and map them */ + while (dcb_reload != fdma->dcb_index) { + old_dcb = dcb_reload; + dcb_reload++; + /* n_dcbs must be a power of 2 */ + dcb_reload &= fdma->n_dcbs - 1; + + fdma_dcb_add(fdma, + old_dcb, + FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); + + sparx5_fdma_reload(sparx5, fdma); + } + + if (counter < weight && napi_complete_done(napi, counter)) + spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA); + + return counter; +} + +int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb, + struct net_device *dev) +{ + int next_dcb, needed_headroom, needed_tailroom, err; + struct sparx5_tx *tx = &sparx5->tx; + struct fdma *fdma = &tx->fdma; + struct sparx5_tx_buf *db_buf; + u64 status; + + next_dcb = lan969x_fdma_get_next_dcb(tx); + if (next_dcb < 0) + return -EBUSY; + + needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0); + needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) + return err; + } + + skb_push(skb, IFH_LEN * 4); + memcpy(skb->data, ifh, IFH_LEN * 4); + skb_put(skb, ETH_FCS_LEN); + + db_buf = &tx->dbs[next_dcb]; + db_buf->dma_addr = dma_map_single(sparx5->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(sparx5->dev, db_buf->dma_addr)) + return -ENOMEM; + + db_buf->dev = dev; + db_buf->skb = skb; + db_buf->ptp = false; + db_buf->used = true; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + db_buf->ptp = true; + + status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len) | + FDMA_DCB_STATUS_INTR; + + fdma_dcb_advance(fdma); + fdma_dcb_add(fdma, next_dcb, 0, status); + + sparx5_fdma_reload(sparx5, fdma); + + return NETDEV_TX_OK; +} + +int lan969x_fdma_init(struct sparx5 *sparx5) +{ + struct sparx5_rx *rx = &sparx5->rx; + int err; + + lan969x_fdma_rx_init(sparx5); + lan969x_fdma_tx_init(sparx5); + sparx5_fdma_injection_mode(sparx5); + + err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask"); + return err; + } + + err = lan969x_fdma_rx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n", + err); + return err; + } + + err = lan969x_fdma_tx_alloc(sparx5); + if (err) { + fdma_free_coherent(sparx5->dev, &rx->fdma); + dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n", + err); + return err; + } + + /* Reset FDMA state */ + spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL); + spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL); + + return err; +} + +int lan969x_fdma_deinit(struct sparx5 *sparx5) +{ + struct sparx5_rx *rx = &sparx5->rx; + struct sparx5_tx *tx = &sparx5->tx; + + sparx5_fdma_stop(sparx5); + fdma_free_coherent(sparx5->dev, &tx->fdma); + fdma_free_coherent(sparx5->dev, &rx->fdma); + lan969x_fdma_free_pages(rx); + page_pool_destroy(rx->page_pool); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c new file mode 100644 index 000000000000..4e422ca50828 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip lan969x Switch driver + * + * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries. + */ + +#include "lan969x.h" + +/* Tx clock selectors */ +#define LAN969X_RGMII_TX_CLK_SEL_125MHZ 1 /* 1000Mbps */ +#define LAN969X_RGMII_TX_CLK_SEL_25MHZ 2 /* 100Mbps */ +#define LAN969X_RGMII_TX_CLK_SEL_2M5MHZ 3 /* 10Mbps */ + +/* Port speed selectors */ +#define LAN969X_RGMII_SPEED_SEL_10 0 /* Select 10Mbps speed */ +#define LAN969X_RGMII_SPEED_SEL_100 1 /* Select 100Mbps speed */ +#define LAN969X_RGMII_SPEED_SEL_1000 2 /* Select 1000Mbps speed */ + +/* Clock delay selectors */ +#define LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS 2 /* Phase shift 45deg */ +#define LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS 3 /* Phase shift 77deg */ +#define LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS 4 /* Phase shift 90deg */ +#define LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS 5 /* Phase shift 112deg */ +#define LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS 6 /* Phase shift 135deg */ +#define LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS 7 /* Phase shift 147deg */ + +#define LAN969X_RGMII_PORT_START_IDX 28 /* Index of the first RGMII port */ +#define LAN969X_RGMII_IFG_TX 4 /* TX Inter Frame Gap value */ +#define LAN969X_RGMII_IFG_RX1 5 /* RX1 Inter Frame Gap value */ +#define LAN969X_RGMII_IFG_RX2 1 /* RX2 Inter Frame Gap value */ + +#define RGMII_PORT_IDX(port) ((port)->portno - LAN969X_RGMII_PORT_START_IDX) + +/* Get the tx clock selector based on the port speed. */ +static int lan969x_rgmii_get_clk_sel(int speed) +{ + return (speed == SPEED_10 ? LAN969X_RGMII_TX_CLK_SEL_2M5MHZ : + speed == SPEED_100 ? LAN969X_RGMII_TX_CLK_SEL_25MHZ : + LAN969X_RGMII_TX_CLK_SEL_125MHZ); +} + +/* Get the port speed selector based on the port speed. */ +static int lan969x_rgmii_get_speed_sel(int speed) +{ + return (speed == SPEED_10 ? LAN969X_RGMII_SPEED_SEL_10 : + speed == SPEED_100 ? LAN969X_RGMII_SPEED_SEL_100 : + LAN969X_RGMII_SPEED_SEL_1000); +} + +/* Get the clock delay selector based on the clock delay in picoseconds. */ +static int lan969x_rgmii_get_clk_delay_sel(struct sparx5_port *port, + u32 delay_ps, u32 *clk_delay_sel) +{ + switch (delay_ps) { + case 0: + /* Hardware default selector. */ + *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS; + break; + case 1000: + *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS; + break; + case 1700: + *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS; + break; + case 2000: + *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS; + break; + case 2500: + *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS; + break; + case 3000: + *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS; + break; + case 3300: + *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS; + break; + default: + dev_err(port->sparx5->dev, "Invalid RGMII delay: %u", delay_ps); + return -EINVAL; + } + + return 0; +} + +/* Configure the RGMII tx clock frequency. */ +static void lan969x_rgmii_tx_clk_config(struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 clk_sel = lan969x_rgmii_get_clk_sel(conf->speed); + u32 idx = RGMII_PORT_IDX(port); + + /* Take the RGMII clock domain out of reset and set tx clock + * frequency. + */ + spx5_rmw(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(clk_sel) | + HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(0) | + HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(0), + HSIO_WRAP_RGMII_CFG_TX_CLK_CFG | + HSIO_WRAP_RGMII_CFG_RGMII_TX_RST | + HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, + port->sparx5, HSIO_WRAP_RGMII_CFG(idx)); +} + +/* Configure the RGMII port device. */ +static void lan969x_rgmii_port_device_config(struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 dtag, dotag, etype, speed_sel, idx = RGMII_PORT_IDX(port); + + speed_sel = lan969x_rgmii_get_speed_sel(conf->speed); + + etype = (port->vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ? + port->custom_etype : + port->vlan_type == SPX5_VLAN_PORT_TYPE_C ? + ETH_P_8021Q : ETH_P_8021AD); + + dtag = port->max_vlan_tags == SPX5_PORT_MAX_TAGS_TWO; + dotag = port->max_vlan_tags != SPX5_PORT_MAX_TAGS_NONE; + + /* Enable the MAC. */ + spx5_wr(DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(1) | + DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(1), + port->sparx5, DEVRGMII_MAC_ENA_CFG(idx)); + + /* Configure the Inter Frame Gap. */ + spx5_wr(DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(LAN969X_RGMII_IFG_TX) | + DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(LAN969X_RGMII_IFG_RX1) | + DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(LAN969X_RGMII_IFG_RX2), + port->sparx5, DEVRGMII_MAC_IFG_CFG(idx)); + + /* Configure port data rate. */ + spx5_wr(DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(speed_sel), + port->sparx5, DEVRGMII_DEV_RST_CTRL(idx)); + + /* Configure VLAN awareness. */ + spx5_wr(DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(etype) | + DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(dtag) | + DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) | + DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag), + port->sparx5, + DEVRGMII_MAC_TAGS_CFG(idx)); +} + +/* Configure the RGMII delay lines in the MAC. + * + * We use the rx-internal-delay-ps" and "tx-internal-delay-ps" properties to + * configure the rx and tx delays for the MAC. If these properties are missing + * or set to zero, the MAC will not apply any delay. + * + * The PHY side delays are determined by the PHY mode + * (e.g. PHY_INTERFACE_MODE_RGMII_{ID, RXID, TXID}), and ignored by the MAC side + * entirely. + */ +static int lan969x_rgmii_delay_config(struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 tx_clk_sel, rx_clk_sel, tx_delay_ps = 0, rx_delay_ps = 0; + u32 idx = RGMII_PORT_IDX(port); + int err; + + of_property_read_u32(port->of_node, "rx-internal-delay-ps", + &rx_delay_ps); + + of_property_read_u32(port->of_node, "tx-internal-delay-ps", + &tx_delay_ps); + + err = lan969x_rgmii_get_clk_delay_sel(port, rx_delay_ps, &rx_clk_sel); + if (err) + return err; + + err = lan969x_rgmii_get_clk_delay_sel(port, tx_delay_ps, &tx_clk_sel); + if (err) + return err; + + /* Configure rx delay. */ + spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) | + HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) | + HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!rx_delay_ps) | + HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(rx_clk_sel), + HSIO_WRAP_DLL_CFG_DLL_RST | + HSIO_WRAP_DLL_CFG_DLL_ENA | + HSIO_WRAP_DLL_CFG_DLL_CLK_ENA | + HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, + port->sparx5, HSIO_WRAP_DLL_CFG(idx, 0)); + + /* Configure tx delay. */ + spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) | + HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) | + HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!tx_delay_ps) | + HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(tx_clk_sel), + HSIO_WRAP_DLL_CFG_DLL_RST | + HSIO_WRAP_DLL_CFG_DLL_ENA | + HSIO_WRAP_DLL_CFG_DLL_CLK_ENA | + HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, + port->sparx5, HSIO_WRAP_DLL_CFG(idx, 1)); + + return 0; +} + +/* Configure GPIO's to be used as RGMII interface. */ +static void lan969x_rgmii_gpio_config(struct sparx5_port *port) +{ + u32 idx = RGMII_PORT_IDX(port); + + /* Enable the RGMII on the GPIOs. */ + spx5_wr(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(1), port->sparx5, + HSIO_WRAP_XMII_CFG(!idx)); +} + +int lan969x_port_config_rgmii(struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + int err; + + err = lan969x_rgmii_delay_config(port, conf); + if (err) + return err; + + lan969x_rgmii_tx_clk_config(port, conf); + lan969x_rgmii_gpio_config(port); + lan969x_rgmii_port_device_config(port, conf); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 0027144a2af2..dbe86f937b21 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -18,9 +18,6 @@ #include "sparx5_main.h" #include "sparx5_port.h" -#define FDMA_XTR_CHANNEL 6 -#define FDMA_INJ_CHANNEL 0 - #define FDMA_XTR_BUFFER_SIZE 2048 #define FDMA_WEIGHT 4 @@ -133,7 +130,7 @@ static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *t sparx5, FDMA_CH_ACTIVATE); } -static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma) +void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma) { /* Reload the RX channel */ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD); @@ -183,7 +180,7 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx return true; } -static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) +int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) { struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); @@ -213,11 +210,11 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) return counter; } -int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb, + struct net_device *dev) { struct sparx5_tx *tx = &sparx5->tx; struct fdma *fdma = &tx->fdma; - static bool first_time = true; void *virt_addr; fdma_dcb_advance(fdma); @@ -238,12 +235,8 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) FDMA_DCB_STATUS_BLOCKO(0) | FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4)); - if (first_time) { - sparx5_fdma_tx_activate(sparx5, tx); - first_time = false; - } else { - sparx5_fdma_reload(sparx5, fdma); - } + sparx5_fdma_reload(sparx5, fdma); + return NETDEV_TX_OK; } @@ -260,10 +253,6 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), FDMA_DCB_STATUS_INTR); - netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, - FDMA_WEIGHT); - napi_enable(&rx->napi); - sparx5_fdma_rx_activate(sparx5, rx); return 0; } @@ -348,7 +337,7 @@ irqreturn_t sparx5_fdma_handler(int irq, void *args) return IRQ_HANDLED; } -static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) +void sparx5_fdma_injection_mode(struct sparx5 *sparx5) { const int byte_swap = 1; int portno; @@ -410,7 +399,7 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) } } -int sparx5_fdma_start(struct sparx5 *sparx5) +int sparx5_fdma_init(struct sparx5 *sparx5) { int err; @@ -443,24 +432,55 @@ int sparx5_fdma_start(struct sparx5 *sparx5) return err; } +int sparx5_fdma_deinit(struct sparx5 *sparx5) +{ + sparx5_fdma_stop(sparx5); + fdma_free_phys(&sparx5->rx.fdma); + fdma_free_phys(&sparx5->tx.fdma); + + return 0; +} + static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5) { return spx5_rd(sparx5, FDMA_PORT_CTRL(0)); } +int sparx5_fdma_start(struct sparx5 *sparx5) +{ + const struct sparx5_ops *ops = sparx5->data->ops; + struct sparx5_rx *rx = &sparx5->rx; + struct sparx5_tx *tx = &sparx5->tx; + + netif_napi_add_weight(rx->ndev, + &rx->napi, + ops->fdma_poll, + FDMA_WEIGHT); + + napi_enable(&rx->napi); + + sparx5_fdma_rx_activate(sparx5, rx); + sparx5_fdma_tx_activate(sparx5, tx); + + return 0; +} + int sparx5_fdma_stop(struct sparx5 *sparx5) { + struct sparx5_rx *rx = &sparx5->rx; + struct sparx5_tx *tx = &sparx5->tx; u32 val; - napi_disable(&sparx5->rx.napi); + napi_disable(&rx->napi); + /* Stop the fdma and channel interrupts */ - sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx); - sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx); + sparx5_fdma_rx_deactivate(sparx5, rx); + sparx5_fdma_tx_deactivate(sparx5, tx); + /* Wait for the RX channel to stop */ read_poll_timeout(sparx5_fdma_port_ctrl, val, FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, 500, 10000, 0, sparx5); - fdma_free_phys(&sparx5->rx.fdma); - fdma_free_phys(&sparx5->tx.fdma); + return 0; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index f61aa15beab7..6a0e5b83ecd0 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -313,10 +313,13 @@ static int sparx5_create_port(struct sparx5 *sparx5, struct initial_port_config *config) { struct sparx5_port *spx5_port; + const struct sparx5_ops *ops; struct net_device *ndev; struct phylink *phylink; int err; + ops = sparx5->data->ops; + ndev = sparx5_create_netdev(sparx5, config->portno); if (IS_ERR(ndev)) { dev_err(sparx5->dev, "Could not create net device: %02u\n", @@ -357,6 +360,9 @@ static int sparx5_create_port(struct sparx5 *sparx5, MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD; + if (ops->is_port_rgmii(spx5_port->portno)) + phy_interface_set_rgmii(spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, spx5_port->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_QSGMII, @@ -778,15 +784,19 @@ static int sparx5_start(struct sparx5 *sparx5) /* Start Frame DMA with fallback to register based INJ/XTR */ err = -ENXIO; - if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) { - if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) + if (sparx5->fdma_irq >= 0) { + if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0 || + !is_sparx5(sparx5)) err = devm_request_irq(sparx5->dev, sparx5->fdma_irq, sparx5_fdma_handler, 0, "sparx5-fdma", sparx5); - if (!err) - err = sparx5_fdma_start(sparx5); + if (!err) { + err = ops->fdma_init(sparx5); + if (!err) + sparx5_fdma_start(sparx5); + } if (err) sparx5->fdma_irq = -ENXIO; } else { @@ -830,6 +840,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) struct initial_port_config *configs, *config; struct device_node *np = pdev->dev.of_node; struct device_node *ports, *portnp; + const struct sparx5_ops *ops; struct reset_control *reset; struct sparx5 *sparx5; int idx = 0, err = 0; @@ -851,6 +862,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) return -EINVAL; regs = sparx5->data->regs; + ops = sparx5->data->ops; /* Do switch core reset if available */ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); @@ -880,7 +892,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) for_each_available_child_of_node(ports, portnp) { struct sparx5_port_config *conf; - struct phy *serdes; + struct phy *serdes = NULL; u32 portno; err = of_property_read_u32(portnp, "reg", &portno); @@ -910,13 +922,17 @@ static int mchp_sparx5_probe(struct platform_device *pdev) conf->sd_sgpio = ~0; else sparx5->sd_sgpio_remapping = true; - serdes = devm_of_phy_get(sparx5->dev, portnp, NULL); - if (IS_ERR(serdes)) { - err = dev_err_probe(sparx5->dev, PTR_ERR(serdes), - "port %u: missing serdes\n", - portno); - of_node_put(portnp); - goto cleanup_config; + /* There is no SerDes node for RGMII ports. */ + if (!ops->is_port_rgmii(portno)) { + serdes = devm_of_phy_get(sparx5->dev, portnp, NULL); + if (IS_ERR(serdes)) { + err = dev_err_probe(sparx5->dev, + PTR_ERR(serdes), + "port %u: missing serdes\n", + portno); + of_node_put(portnp); + goto cleanup_config; + } } config->portno = portno; config->node = portnp; @@ -1014,6 +1030,7 @@ cleanup_pnode: static void mchp_sparx5_remove(struct platform_device *pdev) { struct sparx5 *sparx5 = platform_get_drvdata(pdev); + const struct sparx5_ops *ops = sparx5->data->ops; debugfs_remove_recursive(sparx5->debugfs_root); if (sparx5->xtr_irq) { @@ -1025,7 +1042,7 @@ static void mchp_sparx5_remove(struct platform_device *pdev) sparx5->fdma_irq = -ENXIO; } sparx5_ptp_deinit(sparx5); - sparx5_fdma_stop(sparx5); + ops->fdma_deinit(sparx5); sparx5_cleanup_ports(sparx5); sparx5_vcap_destroy(sparx5); /* Unregister netdevs */ @@ -1072,6 +1089,7 @@ static const struct sparx5_ops sparx5_ops = { .is_port_5g = &sparx5_port_is_5g, .is_port_10g = &sparx5_port_is_10g, .is_port_25g = &sparx5_port_is_25g, + .is_port_rgmii = &sparx5_port_is_rgmii, .get_port_dev_index = &sparx5_port_dev_mapping, .get_port_dev_bit = &sparx5_port_dev_mapping, .get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate, @@ -1079,6 +1097,10 @@ static const struct sparx5_ops sparx5_ops = { .set_port_mux = &sparx5_port_mux_set, .ptp_irq_handler = &sparx5_ptp_irq_handler, .dsm_calendar_calc = &sparx5_dsm_calendar_calc, + .fdma_init = &sparx5_fdma_init, + .fdma_deinit = &sparx5_fdma_deinit, + .fdma_poll = &sparx5_fdma_napi_callback, + .fdma_xmit = &sparx5_fdma_xmit, }; static const struct sparx5_match_data sparx5_desc = { diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index d5dd953b0a71..fe7d8bcc0cd9 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -112,6 +112,8 @@ enum sparx5_feature { #define XTR_QUEUE 0 #define INJ_QUEUE 0 +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 #define FDMA_DCB_MAX 64 #define FDMA_RX_DCB_MAX_DBS 15 #define FDMA_TX_DCB_MAX_DBS 1 @@ -157,11 +159,25 @@ struct sparx5_calendar_data { */ struct sparx5_rx { struct fdma fdma; - struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + struct page_pool *page_pool; + union { + struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + }; dma_addr_t dma; struct napi_struct napi; struct net_device *ndev; u64 packets; + u8 page_order; +}; + +/* Used to store information about TX buffers. */ +struct sparx5_tx_buf { + struct net_device *dev; + struct sk_buff *skb; + dma_addr_t dma_addr; + bool used; + bool ptp; }; /* Frame DMA transmit state: @@ -169,6 +185,7 @@ struct sparx5_rx { */ struct sparx5_tx { struct fdma fdma; + struct sparx5_tx_buf *dbs; u64 packets; u64 dropped; }; @@ -313,6 +330,7 @@ struct sparx5_ops { bool (*is_port_5g)(int portno); bool (*is_port_10g)(int portno); bool (*is_port_25g)(int portno); + bool (*is_port_rgmii)(int portno); u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port); u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port); u32 (*get_hsch_max_group_rate)(int grp); @@ -323,6 +341,13 @@ struct sparx5_ops { irqreturn_t (*ptp_irq_handler)(int irq, void *args); int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi, struct sparx5_calendar_data *data); + int (*port_config_rgmii)(struct sparx5_port *port, + struct sparx5_port_config *conf); + int (*fdma_init)(struct sparx5 *sparx5); + int (*fdma_deinit)(struct sparx5 *sparx5); + int (*fdma_poll)(struct napi_struct *napi, int weight); + int (*fdma_xmit)(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb, + struct net_device *dev); }; struct sparx5_main_io_resource { @@ -433,10 +458,16 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5); void sparx5_port_inj_timer_setup(struct sparx5_port *port); /* sparx5_fdma.c */ +int sparx5_fdma_init(struct sparx5 *sparx5); +int sparx5_fdma_deinit(struct sparx5 *sparx5); int sparx5_fdma_start(struct sparx5 *sparx5); int sparx5_fdma_stop(struct sparx5 *sparx5); -int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb); +int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight); +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb, + struct net_device *dev); irqreturn_t sparx5_fdma_handler(int irq, void *args); +void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma); +void sparx5_fdma_injection_mode(struct sparx5 *sparx5); /* sparx5_mactable.c */ void sparx5_mact_pull_work(struct work_struct *work); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h index 561344f19062..d9ef4ef137b8 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -37,6 +37,7 @@ enum sparx5_target { TARGET_FDMA = 117, TARGET_GCB = 118, TARGET_HSCH = 119, + TARGET_HSIO_WRAP = 120, TARGET_LRN = 122, TARGET_PCEP = 129, TARGET_PCS10G_BR = 132, @@ -54,6 +55,7 @@ enum sparx5_target { TARGET_VCAP_SUPER = 326, TARGET_VOP = 327, TARGET_XQS = 331, + TARGET_DEVRGMII = 392, NUM_TARGETS = 517 }; @@ -5367,6 +5369,69 @@ extern const struct sparx5_regs *regs; #define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\ FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x) +/* LAN969X ONLY */ +/* HSIOWRAP:XMII_CFG:XMII_CFG */ +#define HSIO_WRAP_XMII_CFG(g) \ + __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 0, 0, 1, 4) + +#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG GENMASK(2, 1) +#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(x)\ + FIELD_PREP(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x) +#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_GET(x)\ + FIELD_GET(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x) + +/* LAN969X ONLY */ +/* HSIOWRAP:XMII_CFG:RGMII_CFG */ +#define HSIO_WRAP_RGMII_CFG(g) \ + __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 4, 0, 1, 4) + +#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2) +#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(x)\ + FIELD_PREP(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x) +#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_GET(x)\ + FIELD_GET(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x) + +#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST BIT(1) +#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(x)\ + FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x) +#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_GET(x)\ + FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x) + +#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST BIT(0) +#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(x)\ + FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x) +#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_GET(x)\ + FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x) + +/* LAN969X ONLY */ +/* HSIOWRAP:XMII_CFG:DLL_CFG */ +#define HSIO_WRAP_DLL_CFG(g, r) \ + __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 12, r, 2, 4) + +#define HSIO_WRAP_DLL_CFG_DLL_ENA BIT(19) +#define HSIO_WRAP_DLL_CFG_DLL_ENA_SET(x)\ + FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_ENA, x) +#define HSIO_WRAP_DLL_CFG_DLL_ENA_GET(x)\ + FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_ENA, x) + +#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA BIT(18) +#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(x)\ + FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x) +#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_GET(x)\ + FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x) + +#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL GENMASK(17, 15) +#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(x)\ + FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x) +#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_GET(x)\ + FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x) + +#define HSIO_WRAP_DLL_CFG_DLL_RST BIT(0) +#define HSIO_WRAP_DLL_CFG_DLL_RST_SET(x)\ + FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_RST, x) +#define HSIO_WRAP_DLL_CFG_DLL_RST_GET(x)\ + FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_RST, x) + /* LRN:COMMON:COMMON_ACCESS_CTRL */ #define LRN_COMMON_ACCESS_CTRL \ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4) @@ -8110,4 +8175,84 @@ extern const struct sparx5_regs *regs; #define XQS_CNT(g) \ __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4) +/* LAN969X ONLY */ +/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEVRGMII_DEV_RST_CTRL(t) \ + __REG(TARGET_DEVRGMII, t, 2, 0, 0, 1, 36, 0, 0, 1, 4) + +#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x) +#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x) + +/* LAN969X ONLY */ +/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEVRGMII_MAC_ENA_CFG(t) \ + __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 0, 0, 1, 4) + +#define DEVRGMII_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_ENA_CFG_RX_ENA, x) +#define DEVRGMII_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEVRGMII_MAC_ENA_CFG_RX_ENA, x) + +#define DEVRGMII_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_ENA_CFG_TX_ENA, x) +#define DEVRGMII_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEVRGMII_MAC_ENA_CFG_TX_ENA, x) + +/* LAN969X ONLY */ +/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEVRGMII_MAC_TAGS_CFG(t) \ + __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 12, 0, 1, 4) + +#define DEVRGMII_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) +#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x) +#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_GET(x)\ + FIELD_GET(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x) + +#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3) +#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) +#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\ + FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) + +#define DEVRGMII_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1) +#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x) +#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_GET(x)\ + FIELD_GET(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x) + +#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ + FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x) + +/* LAN969X ONLY */ +/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEVRGMII_MAC_IFG_CFG(t) \ + __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 24, 0, 1, 4) + +#define DEVRGMII_MAC_IFG_CFG_TX_IFG GENMASK(12, 8) +#define DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_IFG_CFG_TX_IFG, x) +#define DEVRGMII_MAC_IFG_CFG_TX_IFG_GET(x)\ + FIELD_GET(DEVRGMII_MAC_IFG_CFG_TX_IFG, x) + +#define DEVRGMII_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4) +#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x) +#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_GET(x)\ + FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x) + +#define DEVRGMII_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0) +#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(x)\ + FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x) +#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_GET(x)\ + FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x) + #endif /* _SPARX5_MAIN_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index b6f635d85820..138ac58fae51 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -232,9 +232,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) struct net_device_stats *stats = &dev->stats; struct sparx5_port *port = netdev_priv(dev); struct sparx5 *sparx5 = port->sparx5; + const struct sparx5_ops *ops; u32 ifh[IFH_LEN]; netdev_tx_t ret; + ops = sparx5->data->ops; + memset(ifh, 0, IFH_LEN * 4); sparx5_set_port_ifh(sparx5, ifh, port->portno); @@ -254,7 +257,7 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); spin_lock(&sparx5->tx_lock); if (sparx5->fdma_irq > 0) - ret = sparx5_fdma_xmit(sparx5, ifh, skb); + ret = ops->fdma_xmit(sparx5, ifh, skb, dev); else ret = sparx5_inject(sparx5, ifh, skb, dev); spin_unlock(&sparx5->tx_lock); @@ -264,6 +267,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) if (ret < 0) goto drop; + if (!is_sparx5(sparx5)) + /* When lan969x and TX_OK, stats and SKB consumption is handled + * in the TX completion loop, so dont go any further. + */ + return NETDEV_TX_OK; + stats->tx_bytes += skb->len; stats->tx_packets++; sparx5->tx.packets++; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c index f8562c1a894d..cfb4b2e17ace 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c @@ -32,7 +32,19 @@ sparx5_phylink_mac_select_pcs(struct phylink_config *config, { struct sparx5_port *port = netdev_priv(to_net_dev(config->dev)); - return &port->phylink_pcs; + /* Return the PCS for all the modes that require it. */ + switch (interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_5GBASER: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_25GBASER: + return &port->phylink_pcs; + default: + return NULL; + } } static void sparx5_phylink_mac_config(struct phylink_config *config, @@ -77,7 +89,7 @@ static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs) return container_of(pcs, struct sparx5_port, phylink_pcs); } -static void sparx5_pcs_get_state(struct phylink_pcs *pcs, +static void sparx5_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { struct sparx5_port *port = sparx5_pcs_to_port(pcs); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index f9d1a6bb9bff..04bc8fffaf96 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -257,6 +257,15 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5, conf->speed != SPEED_25000)) return sparx5_port_error(port, conf, SPX5_PERR_SPEED); break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + if (conf->speed != SPEED_1000 && + conf->speed != SPEED_100 && + conf->speed != SPEED_10) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + break; default: return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); } @@ -994,6 +1003,7 @@ int sparx5_port_config(struct sparx5 *sparx5, struct sparx5_port *port, struct sparx5_port_config *conf) { + bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode); bool high_speed_dev = sparx5_is_baser(conf->portmode); const struct sparx5_ops *ops = sparx5->data->ops; int err, urgency, stop_wm; @@ -1002,8 +1012,14 @@ int sparx5_port_config(struct sparx5 *sparx5, if (err) return err; + if (rgmii) { + err = ops->port_config_rgmii(port, conf); + if (err) + return err; + } + /* high speed device is already configured */ - if (!high_speed_dev) + if (!rgmii && !high_speed_dev) sparx5_port_config_low_set(sparx5, port, conf); /* Configure flow control */ @@ -1067,24 +1083,6 @@ int sparx5_port_init(struct sparx5 *sparx5, if (err) return err; - /* Configure MAC vlan awareness */ - err = sparx5_port_max_tags_set(sparx5, port); - if (err) - return err; - - /* Set Max Length */ - spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), - DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, - sparx5, - DEV2G5_MAC_MAXLEN_CFG(port->portno)); - - /* 1G/2G5: Signal Detect configuration */ - spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) | - DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) | - DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena), - sparx5, - DEV2G5_PCS1G_SD_CFG(port->portno)); - /* Set Pause WM hysteresis */ spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) | QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) | @@ -1108,6 +1106,27 @@ int sparx5_port_init(struct sparx5 *sparx5, ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, sparx5, ANA_CL_FILTER_CTRL(port->portno)); + if (ops->is_port_rgmii(port->portno)) + return 0; /* RGMII device - nothing more to configure */ + + /* Configure MAC vlan awareness */ + err = sparx5_port_max_tags_set(sparx5, port); + if (err) + return err; + + /* Set Max Length */ + spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), + DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, + sparx5, + DEV2G5_MAC_MAXLEN_CFG(port->portno)); + + /* 1G/2G5: Signal Detect configuration */ + spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) | + DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) | + DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena), + sparx5, + DEV2G5_PCS1G_SD_CFG(port->portno)); + if (conf->portmode == PHY_INTERFACE_MODE_QSGMII || conf->portmode == PHY_INTERFACE_MODE_SGMII) { err = sparx5_serdes_set(sparx5, port, conf); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h index 9b9bcc6834bc..c8a37468a3d1 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h @@ -40,6 +40,11 @@ static inline bool sparx5_port_is_25g(int portno) return portno >= 56 && portno <= 63; } +static inline bool sparx5_port_is_rgmii(int portno) +{ + return false; +} + static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port) { const struct sparx5_ops *ops = sparx5->data->ops; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 2dc0c6ad54be..be95336ce089 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1656,9 +1656,9 @@ static int __init mana_driver_init(void) static void __exit mana_driver_exit(void) { - debugfs_remove(mana_debugfs_root); - pci_unregister_driver(&mana_driver); + + debugfs_remove(mana_debugfs_root); } module_init(mana_driver_init); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 3d72aa7b1305..ef93df520887 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1432,7 +1432,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port, memset(ifh, 0, OCELOT_TAG_LEN); ocelot_ifh_set_bypass(ifh, 1); - ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports)); + ocelot_ifh_set_src(ifh, ocelot->num_phys_ports); ocelot_ifh_set_dest(ifh, BIT_ULL(port)); ocelot_ifh_set_qos_class(ifh, qos_class); ocelot_ifh_set_tag_type(ifh, tag_type); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 558e03301aa8..7663d196eaf8 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, bool is_static, void *data) { struct ocelot_dump_ctx *dump = data; + struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx; u32 portid = NETLINK_CB(dump->cb->skb).portid; u32 seq = dump->cb->nlh->nlmsg_seq; struct nlmsghdr *nlh; struct ndmsg *ndm; - if (dump->idx < dump->cb->args[2]) + if (dump->idx < ctx->fdb_idx) goto skip; nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, @@ -992,6 +993,16 @@ static int ocelot_port_get_ts_info(struct net_device *dev, return ocelot_get_ts_info(ocelot, port, info); } +static void ocelot_port_ts_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + ocelot_port_get_ts_stats(ocelot, port, ts_stats); +} + static const struct ethtool_ops ocelot_ethtool_ops = { .get_strings = ocelot_port_get_strings, .get_ethtool_stats = ocelot_port_get_ethtool_stats, @@ -999,6 +1010,7 @@ static const struct ethtool_ops ocelot_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_ts_info = ocelot_port_get_ts_info, + .get_ts_stats = ocelot_port_ts_stats, }; static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port, diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index 808ce8e68d39..cc1088988da0 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -680,9 +680,14 @@ static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port, skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time + OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) { - dev_warn_ratelimited(ocelot->dev, - "port %d invalidating stale timestamp ID %u which seems lost\n", - port, OCELOT_SKB_CB(skb)->ts_id); + u64_stats_update_begin(&ocelot_port->ts_stats->syncp); + ocelot_port->ts_stats->lost++; + u64_stats_update_end(&ocelot_port->ts_stats->syncp); + + dev_dbg_ratelimited(ocelot->dev, + "port %d invalidating stale timestamp ID %u which seems lost\n", + port, OCELOT_SKB_CB(skb)->ts_id); + __skb_unlink(skb, &ocelot_port->tx_skbs); kfree_skb(skb); ocelot->ptp_skbs_in_flight--; @@ -748,13 +753,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, return 0; ptp_class = ptp_classify_raw(skb); - if (ptp_class == PTP_CLASS_NONE) - return -EINVAL; + if (ptp_class == PTP_CLASS_NONE) { + err = -EINVAL; + goto error; + } /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */ if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) { OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; + + u64_stats_update_begin(&ocelot_port->ts_stats->syncp); + ocelot_port->ts_stats->onestep_pkts_unconfirmed++; + u64_stats_update_end(&ocelot_port->ts_stats->syncp); + return 0; } @@ -764,14 +776,16 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { *clone = skb_clone_sk(skb); - if (!(*clone)) - return -ENOMEM; + if (!(*clone)) { + err = -ENOMEM; + goto error; + } /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone); if (err) { kfree_skb(*clone); - return err; + goto error; } skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS; @@ -780,6 +794,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, } return 0; + +error: + u64_stats_update_begin(&ocelot_port->ts_stats->syncp); + ocelot_port->ts_stats->err++; + u64_stats_update_end(&ocelot_port->ts_stats->syncp); + return err; } EXPORT_SYMBOL(ocelot_port_txtstamp_request); @@ -816,6 +836,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) while (budget--) { struct skb_shared_hwtstamps shhwtstamps; + struct ocelot_port *ocelot_port; u32 val, id, seqid, txport; struct sk_buff *skb_match; struct timespec64 ts; @@ -832,17 +853,27 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); + ocelot_port = ocelot->ports[txport]; /* Retrieve its associated skb */ skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id, seqid); if (!skb_match) { - dev_warn_ratelimited(ocelot->dev, - "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n", - txport, seqid, id); + u64_stats_update_begin(&ocelot_port->ts_stats->syncp); + ocelot_port->ts_stats->err++; + u64_stats_update_end(&ocelot_port->ts_stats->syncp); + + dev_dbg_ratelimited(ocelot->dev, + "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n", + txport, seqid, id); + goto next_ts; } + u64_stats_update_begin(&ocelot_port->ts_stats->syncp); + ocelot_port->ts_stats->pkts++; + u64_stats_update_end(&ocelot_port->ts_stats->syncp); + /* Get the h/w timestamp */ ocelot_get_hwtimestamp(ocelot, &ts); diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index c018783757fb..545710dadcf5 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -821,6 +821,26 @@ void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port, } EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats); +void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port, + struct ethtool_ts_stats *ts_stats) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_ts_stats *stats = ocelot_port->ts_stats; + unsigned int start; + + if (!ocelot->ptp) + return; + + do { + start = u64_stats_fetch_begin(&stats->syncp); + ts_stats->pkts = stats->pkts; + ts_stats->onestep_pkts_unconfirmed = stats->onestep_pkts_unconfirmed; + ts_stats->lost = stats->lost; + ts_stats->err = stats->err; + } while (u64_stats_fetch_retry(&stats->syncp, start)); +} +EXPORT_SYMBOL_GPL(ocelot_port_get_ts_stats); + void ocelot_port_get_stats64(struct ocelot *ocelot, int port, struct rtnl_link_stats64 *stats) { @@ -960,6 +980,23 @@ int ocelot_stats_init(struct ocelot *ocelot) if (!ocelot->stats) return -ENOMEM; + if (ocelot->ptp) { + for (int port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + ocelot_port->ts_stats = devm_kzalloc(ocelot->dev, + sizeof(*ocelot_port->ts_stats), + GFP_KERNEL); + if (!ocelot_port->ts_stats) + return -ENOMEM; + + u64_stats_init(&ocelot_port->ts_stats->syncp); + } + } + snprintf(queue_name, sizeof(queue_name), "%s-stats", dev_name(ocelot->dev)); ocelot->stats_queue = create_singlethread_workqueue(queue_name); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 2ec62c8d86e1..59486fe2ad18 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) struct sk_buff *skb; skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL); + if (!skb) + return NULL; skb_put(skb, size); return skb; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 9d97cd281f18..c03558adda91 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, map_id_full = be64_to_cpu(cbe->map_ptr); map_id = map_id_full; - if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + if (size_add(pkt_size, data_size) > INT_MAX || + len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) return -EINVAL; if (cbe->hdr.ver != NFP_CCM_ABI_VERSION) return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 98e098c09c03..abba165738a3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2779,7 +2779,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) break; } - netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); + netdev->watchdog_timeo = secs_to_jiffies(5); /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 720f577929db..499e5e39d513 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1120,20 +1120,6 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) } } -static void nv_napi_enable(struct net_device *dev) -{ - struct fe_priv *np = get_nvpriv(dev); - - napi_enable(&np->napi); -} - -static void nv_napi_disable(struct net_device *dev) -{ - struct fe_priv *np = get_nvpriv(dev); - - napi_disable(&np->napi); -} - #define MII_READ (-1) /* mii_rw: read/write a register on the PHY. * @@ -3114,7 +3100,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) * Changing the MTU is a rare event, it shouldn't matter. */ nv_disable_irq(dev); - nv_napi_disable(dev); + napi_disable(&np->napi); netif_tx_lock_bh(dev); netif_addr_lock(dev); spin_lock(&np->lock); @@ -3143,7 +3129,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) spin_unlock(&np->lock); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); - nv_napi_enable(dev); + napi_enable(&np->napi); nv_enable_irq(dev); } return 0; @@ -4731,7 +4717,7 @@ static int nv_set_ringparam(struct net_device *dev, if (netif_running(dev)) { nv_disable_irq(dev); - nv_napi_disable(dev); + napi_disable(&np->napi); netif_tx_lock_bh(dev); netif_addr_lock(dev); spin_lock(&np->lock); @@ -4784,7 +4770,7 @@ static int nv_set_ringparam(struct net_device *dev, spin_unlock(&np->lock); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); - nv_napi_enable(dev); + napi_enable(&np->napi); nv_enable_irq(dev); } return 0; @@ -5277,7 +5263,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 if (test->flags & ETH_TEST_FL_OFFLINE) { if (netif_running(dev)) { netif_stop_queue(dev); - nv_napi_disable(dev); + napi_disable(&np->napi); netif_tx_lock_bh(dev); netif_addr_lock(dev); spin_lock_irq(&np->lock); @@ -5334,7 +5320,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 /* restart rx engine */ nv_start_rxtx(dev); netif_start_queue(dev); - nv_napi_enable(dev); + napi_enable(&np->napi); nv_enable_hw_interrupts(dev, np->irqmask); } } @@ -5576,6 +5562,7 @@ static int nv_open(struct net_device *dev) /* ask for interrupts */ nv_enable_hw_interrupts(dev, np->irqmask); + netdev_lock(dev); spin_lock_irq(&np->lock); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(0, base + NvRegMulticastAddrB); @@ -5594,7 +5581,7 @@ static int nv_open(struct net_device *dev) ret = nv_update_linkspeed(dev); nv_start_rxtx(dev); netif_start_queue(dev); - nv_napi_enable(dev); + napi_enable_locked(&np->napi); if (ret) { netif_carrier_on(dev); @@ -5611,6 +5598,7 @@ static int nv_open(struct net_device *dev) round_jiffies(jiffies + STATS_INTERVAL)); spin_unlock_irq(&np->lock); + netdev_unlock(dev); /* If the loopback feature was set while the device was down, make sure * that it's set correctly now. @@ -5632,7 +5620,7 @@ static int nv_close(struct net_device *dev) spin_lock_irq(&np->lock); np->in_shutdown = 1; spin_unlock_irq(&np->lock); - nv_napi_disable(dev); + napi_disable(&np->napi); synchronize_irq(np->pci_dev->irq); del_timer_sync(&np->oom_kick); diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c index f9c0dcd965c2..db200e4ec284 100644 --- a/drivers/net/ethernet/oa_tc6.c +++ b/drivers/net/ethernet/oa_tc6.c @@ -113,6 +113,7 @@ struct oa_tc6 { struct mii_bus *mdiobus; struct spi_device *spi; struct mutex spi_ctrl_lock; /* Protects spi control transfer */ + spinlock_t tx_skb_lock; /* Protects tx skb handling */ void *spi_ctrl_tx_buf; void *spi_ctrl_rx_buf; void *spi_data_tx_buf; @@ -1004,8 +1005,10 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6) for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits; used_tx_credits++) { if (!tc6->ongoing_tx_skb) { + spin_lock_bh(&tc6->tx_skb_lock); tc6->ongoing_tx_skb = tc6->waiting_tx_skb; tc6->waiting_tx_skb = NULL; + spin_unlock_bh(&tc6->tx_skb_lock); } if (!tc6->ongoing_tx_skb) break; @@ -1111,8 +1114,9 @@ static int oa_tc6_spi_thread_handler(void *data) /* This kthread will be waken up if there is a tx skb or mac-phy * interrupt to perform spi transfer with tx chunks. */ - wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb || - tc6->int_flag || + wait_event_interruptible(tc6->spi_wq, tc6->int_flag || + (tc6->waiting_tx_skb && + tc6->tx_credits) || kthread_should_stop()); if (kthread_should_stop()) @@ -1209,7 +1213,9 @@ netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb) return NETDEV_TX_OK; } + spin_lock_bh(&tc6->tx_skb_lock); tc6->waiting_tx_skb = skb; + spin_unlock_bh(&tc6->tx_skb_lock); /* Wake spi kthread to perform spi transfer */ wake_up_interruptible(&tc6->spi_wq); @@ -1239,6 +1245,7 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev) tc6->netdev = netdev; SET_NETDEV_DEV(netdev, &spi->dev); mutex_init(&tc6->spi_ctrl_lock); + spin_lock_init(&tc6->tx_skb_lock); /* Set the SPI controller to pump at realtime priority */ tc6->spi->rt = true; diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 1c61390677f7..04f00ea94230 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -18,8 +18,6 @@ struct ionic_lif; #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 -#define IONIC_ASIC_TYPE_ELBA 2 - #define DEVCMD_TIMEOUT 5 #define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100) @@ -59,7 +57,6 @@ struct ionic { DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX); cpumask_var_t *affinity_masks; struct delayed_work doorbell_check_dwork; - struct work_struct nb_work; struct notifier_block nb; struct rw_semaphore vf_op_lock; /* lock for VF operations */ struct ionic_vf *vfs; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 9e42d599840d..57edcde9e6f8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -277,7 +277,10 @@ void ionic_dev_teardown(struct ionic *ionic) idev->phy_cmb_pages = 0; idev->cmb_npages = 0; - destroy_workqueue(ionic->wq); + if (ionic->wq) { + destroy_workqueue(ionic->wq); + ionic->wq = NULL; + } mutex_destroy(&idev->cmb_inuse_lock); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index dda22fa4448c..a2d4336d2766 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -158,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev, 25000baseCR_Full); copper_seen++; break; + case IONIC_XCVR_PID_QSFP_50G_CR2_FC: + case IONIC_XCVR_PID_QSFP_50G_CR2: + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseCR2_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_QSFP_200G_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_QSFP_400G_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full); + copper_seen++; + break; case IONIC_XCVR_PID_SFP_10GBASE_AOC: case IONIC_XCVR_PID_SFP_10GBASE_CU: ethtool_link_ksettings_add_link_mode(ks, supported, @@ -196,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseSR_Full); break; + case IONIC_XCVR_PID_QSFP_200G_AOC: + case IONIC_XCVR_PID_QSFP_200G_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 200000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_200G_FR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 200000baseLR4_ER4_FR4_Full); + break; + case IONIC_XCVR_PID_QSFP_200G_DR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 200000baseDR4_Full); + break; + case IONIC_XCVR_PID_QSFP_400G_FR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 400000baseLR4_ER4_FR4_Full); + break; + case IONIC_XCVR_PID_QSFP_400G_DR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 400000baseDR4_Full); + break; + case IONIC_XCVR_PID_QSFP_400G_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 400000baseSR4_Full); + break; case IONIC_XCVR_PID_SFP_10GBASE_SR: ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); @@ -929,6 +968,7 @@ static int ionic_get_module_info(struct net_device *netdev, break; case SFF8024_ID_QSFP_8436_8636: case SFF8024_ID_QSFP28_8636: + case SFF8024_ID_QSFP_PLUS_CMIS: modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; break; @@ -961,8 +1001,8 @@ static int ionic_get_module_eeprom(struct net_device *netdev, len = min_t(u32, sizeof(xcvr->sprom), ee->len); do { - memcpy(data, xcvr->sprom, len); - memcpy(tbuf, xcvr->sprom, len); + memcpy(data, &xcvr->sprom[ee->offset], len); + memcpy(tbuf, &xcvr->sprom[ee->offset], len); /* Let's make sure we got a consistent copy */ if (!memcmp(data, tbuf, len)) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 9c85c0706c6e..830c8adbfbee 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -1277,7 +1277,10 @@ enum ionic_xcvr_pid { IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3, IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4, IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5, - + IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6, + IONIC_XCVR_PID_QSFP_50G_CR2 = 7, + IONIC_XCVR_PID_QSFP_200G_CR4 = 8, + IONIC_XCVR_PID_QSFP_400G_CR4 = 9, /* Fiber */ IONIC_XCVR_PID_QSFP_100G_AOC = 50, IONIC_XCVR_PID_QSFP_100G_ACC = 51, @@ -1303,6 +1306,15 @@ enum ionic_xcvr_pid { IONIC_XCVR_PID_SFP_25GBASE_ACC = 71, IONIC_XCVR_PID_SFP_10GBASE_T = 72, IONIC_XCVR_PID_SFP_1000BASE_T = 73, + IONIC_XCVR_PID_QSFP_200G_AOC = 74, + IONIC_XCVR_PID_QSFP_200G_FR4 = 75, + IONIC_XCVR_PID_QSFP_200G_DR4 = 76, + IONIC_XCVR_PID_QSFP_200G_SR4 = 77, + IONIC_XCVR_PID_QSFP_200G_ACC = 78, + IONIC_XCVR_PID_QSFP_400G_FR4 = 79, + IONIC_XCVR_PID_QSFP_400G_DR4 = 80, + IONIC_XCVR_PID_QSFP_400G_SR4 = 81, + IONIC_XCVR_PID_QSFP_400G_VR4 = 82, }; /** @@ -1404,6 +1416,8 @@ struct ionic_xcvr_status { */ union ionic_port_config { struct { +#define IONIC_SPEED_400G 400000 /* 400G in Mbps */ +#define IONIC_SPEED_200G 200000 /* 200G in Mbps */ #define IONIC_SPEED_100G 100000 /* 100G in Mbps */ #define IONIC_SPEED_50G 50000 /* 50G in Mbps */ #define IONIC_SPEED_40G 40000 /* 40G in Mbps */ @@ -3209,7 +3223,11 @@ union ionic_adminq_comp { #define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 #define IONIC_DEV_CMD_DONE 0x00000001 -#define IONIC_ASIC_TYPE_CAPRI 0 +#define IONIC_ASIC_TYPE_NONE 0 +#define IONIC_ASIC_TYPE_CAPRI 1 +#define IONIC_ASIC_TYPE_ELBA 2 +#define IONIC_ASIC_TYPE_GIGLIO 3 +#define IONIC_ASIC_TYPE_SALINA 4 /** * struct ionic_doorbell - Doorbell register layout diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 40496587b2b3..7707a9e53c43 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3265,7 +3265,7 @@ int ionic_lif_alloc(struct ionic *ionic) lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, le32_to_cpu(lif->identity->eth.min_frame_size)); lif->netdev->max_mtu = - le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; + le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; lif->neqs = ionic->neqs_per_lif; lif->nxqs = ionic->ntxqs_per_lif; @@ -3804,10 +3804,6 @@ err_out_adminq_deinit: return err; } -static void ionic_lif_notify_work(struct work_struct *ws) -{ -} - static void ionic_lif_set_netdev_info(struct ionic_lif *lif) { struct ionic_admin_ctx ctx = { @@ -3858,8 +3854,6 @@ int ionic_lif_register(struct ionic_lif *lif) ionic_lif_register_phc(lif); - INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); - lif->ionic->nb.notifier_call = ionic_lif_notify; err = register_netdevice_notifier(&lif->ionic->nb); @@ -3869,8 +3863,8 @@ int ionic_lif_register(struct ionic_lif *lif) /* only register LIF0 for now */ err = register_netdev(lif->netdev); if (err) { - dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); - ionic_lif_unregister_phc(lif); + dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err); + ionic_lif_unregister(lif); return err; } @@ -3885,7 +3879,6 @@ void ionic_lif_unregister(struct ionic_lif *lif) { if (lif->ionic->nb.notifier_call) { unregister_netdevice_notifier(&lif->ionic->nb); - cancel_work_sync(&lif->ionic->nb_work); lif->ionic->nb.notifier_call = NULL; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 0f817c3f92d8..daf1e82cb76b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code) case IONIC_RC_EQTYPE: case IONIC_RC_EQID: case IONIC_RC_EINVAL: - case IONIC_RC_ENOSUPP: return -EINVAL; + case IONIC_RC_ENOSUPP: + return -EOPNOTSUPP; case IONIC_RC_EPERM: return -EPERM; case IONIC_RC_ENOENT: diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 9cff0a8ffb2c..3383ee1dad14 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2832,7 +2832,7 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter, static ssize_t netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -2860,7 +2860,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, static ssize_t netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -2901,7 +2901,7 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter, static ssize_t netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -2922,7 +2922,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, } static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -2946,20 +2946,20 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, static const struct bin_attribute bin_attr_crb = { .attr = { .name = "crb", .mode = 0644 }, .size = 0, - .read = netxen_sysfs_read_crb, - .write = netxen_sysfs_write_crb, + .read_new = netxen_sysfs_read_crb, + .write_new = netxen_sysfs_write_crb, }; static const struct bin_attribute bin_attr_mem = { .attr = { .name = "mem", .mode = 0644 }, .size = 0, - .read = netxen_sysfs_read_mem, - .write = netxen_sysfs_write_mem, + .read_new = netxen_sysfs_read_mem, + .write_new = netxen_sysfs_write_mem, }; static ssize_t netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -3082,7 +3082,7 @@ out: static const struct bin_attribute bin_attr_dimm = { .attr = { .name = "dimm", .mode = 0644 }, .size = sizeof(struct netxen_dimm_cfg), - .read = netxen_sysfs_read_dimm, + .read_new = netxen_sysfs_read_dimm, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b45efc272fdb..c7f497c36f66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3358,6 +3358,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) p_ptt, &nvm_info.num_images); if (rc == -EOPNOTSUPP) { DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); + nvm_info.num_images = 0; goto out; } else if (rc || !nvm_info.num_images) { DP_ERR(p_hwfn, "Failed getting number of images\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 74125188beb8..c0f20464fd1e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -264,7 +264,7 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, } static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -281,7 +281,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, } static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -310,7 +310,7 @@ static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter, } static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -332,7 +332,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, } static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); @@ -396,7 +396,7 @@ static int validate_pm_config(struct qlcnic_adapter *adapter, static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -446,7 +446,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -539,7 +539,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -623,7 +623,7 @@ out: static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -675,7 +675,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter, static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -722,7 +722,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -769,7 +769,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -804,7 +804,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -839,7 +839,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -868,7 +868,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -898,7 +898,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -938,7 +938,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -1115,7 +1115,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { @@ -1195,64 +1195,63 @@ static const struct device_attribute dev_attr_beacon = { static const struct bin_attribute bin_attr_crb = { .attr = { .name = "crb", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_read_crb, - .write = qlcnic_sysfs_write_crb, + .read_new = qlcnic_sysfs_read_crb, + .write_new = qlcnic_sysfs_write_crb, }; static const struct bin_attribute bin_attr_mem = { .attr = { .name = "mem", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_read_mem, - .write = qlcnic_sysfs_write_mem, + .read_new = qlcnic_sysfs_read_mem, + .write_new = qlcnic_sysfs_write_mem, }; static const struct bin_attribute bin_attr_npar_config = { .attr = { .name = "npar_config", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_read_npar_config, - .write = qlcnic_sysfs_write_npar_config, + .read_new = qlcnic_sysfs_read_npar_config, + .write_new = qlcnic_sysfs_write_npar_config, }; static const struct bin_attribute bin_attr_pci_config = { .attr = { .name = "pci_config", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_read_pci_config, - .write = NULL, + .read_new = qlcnic_sysfs_read_pci_config, }; static const struct bin_attribute bin_attr_port_stats = { .attr = { .name = "port_stats", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_get_port_stats, - .write = qlcnic_sysfs_clear_port_stats, + .read_new = qlcnic_sysfs_get_port_stats, + .write_new = qlcnic_sysfs_clear_port_stats, }; static const struct bin_attribute bin_attr_esw_stats = { .attr = { .name = "esw_stats", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_get_esw_stats, - .write = qlcnic_sysfs_clear_esw_stats, + .read_new = qlcnic_sysfs_get_esw_stats, + .write_new = qlcnic_sysfs_clear_esw_stats, }; static const struct bin_attribute bin_attr_esw_config = { .attr = { .name = "esw_config", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_read_esw_config, - .write = qlcnic_sysfs_write_esw_config, + .read_new = qlcnic_sysfs_read_esw_config, + .write_new = qlcnic_sysfs_write_esw_config, }; static const struct bin_attribute bin_attr_pm_config = { .attr = { .name = "pm_config", .mode = 0644 }, .size = 0, - .read = qlcnic_sysfs_read_pm_config, - .write = qlcnic_sysfs_write_pm_config, + .read_new = qlcnic_sysfs_read_pm_config, + .write_new = qlcnic_sysfs_write_pm_config, }; static const struct bin_attribute bin_attr_flash = { .attr = { .name = "flash", .mode = 0644 }, .size = 0, - .read = qlcnic_83xx_sysfs_flash_read_handler, - .write = qlcnic_83xx_sysfs_flash_write_handler, + .read_new = qlcnic_83xx_sysfs_flash_read_handler, + .write_new = qlcnic_83xx_sysfs_flash_write_handler, }; #ifdef CONFIG_QLCNIC_HWMON diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 9ce0e8a64ba8..a73dcaffa8c5 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1684,6 +1684,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) if (tmp8 & CmdTxEnb) RTL_W8 (ChipCmd, CmdRxEnb); + netdev_lock(dev); spin_lock_bh(&tp->rx_lock); /* Disable interrupts by clearing the interrupt mask. */ RTL_W16 (IntrMask, 0x0000); @@ -1694,11 +1695,12 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) spin_unlock_irq(&tp->lock); /* ...and finally, reset everything */ - napi_enable(&tp->napi); + napi_enable_locked(&tp->napi); rtl8139_hw_start(dev); netif_wake_queue(dev); spin_unlock_bh(&tp->rx_lock); + netdev_unlock(dev); } static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue) diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index be4c9622618d..7a194a8ab989 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -23,7 +23,7 @@ enum mac_version { RTL_GIGA_MAC_VER_08, RTL_GIGA_MAC_VER_09, RTL_GIGA_MAC_VER_10, - RTL_GIGA_MAC_VER_11, + /* support for RTL_GIGA_MAC_VER_11 has been removed */ /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */ /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */ RTL_GIGA_MAC_VER_14, @@ -71,6 +71,8 @@ enum mac_version { RTL_GIGA_MAC_VER_64, RTL_GIGA_MAC_VER_65, RTL_GIGA_MAC_VER_66, + RTL_GIGA_MAC_VER_70, + RTL_GIGA_MAC_VER_71, RTL_GIGA_MAC_NONE }; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 739707a7b40f..5a5eba49c651 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -16,7 +16,6 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/ethtool.h> -#include <linux/hwmon.h> #include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/in.h> @@ -57,6 +56,8 @@ #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" #define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" #define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw" +#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw" +#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw" #define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw" #define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw" @@ -104,7 +105,6 @@ static const struct { [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" }, - [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, @@ -141,8 +141,10 @@ static const struct { /* reserve 62 for CFG_METHOD_4 in the vendor driver */ [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1}, - [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2}, - [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3}, + [RTL_GIGA_MAC_VER_65] = {"RTL8125D", FIRMWARE_8125D_2}, + [RTL_GIGA_MAC_VER_66] = {"RTL8125BP", FIRMWARE_8125BP_2}, + [RTL_GIGA_MAC_VER_70] = {"RTL8126A", FIRMWARE_8126A_2}, + [RTL_GIGA_MAC_VER_71] = {"RTL8126A", FIRMWARE_8126A_3}, }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -623,7 +625,6 @@ struct rtl8169_tc_offsets { enum rtl_flag { RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, RTL_FLAG_TASK_TX_TIMEOUT, RTL_FLAG_MAX }; @@ -632,6 +633,7 @@ enum rtl_dash_type { RTL_DASH_NONE, RTL_DASH_DP, RTL_DASH_EP, + RTL_DASH_25_BP, }; struct rtl8169_private { @@ -708,6 +710,8 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2); MODULE_FIRMWARE(FIRMWARE_8125A_3); MODULE_FIRMWARE(FIRMWARE_8125B_2); MODULE_FIRMWARE(FIRMWARE_8125D_1); +MODULE_FIRMWARE(FIRMWARE_8125D_2); +MODULE_FIRMWARE(FIRMWARE_8125BP_2); MODULE_FIRMWARE(FIRMWARE_8126A_2); MODULE_FIRMWARE(FIRMWARE_8126A_3); @@ -1230,7 +1234,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: r8168g_mdio_write(tp, location, val); break; default: @@ -1245,7 +1249,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: return r8168g_mdio_read(tp, location); default: return r8169_mdio_read(tp, location); @@ -1360,10 +1364,19 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp) rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); } +static void rtl8125bp_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x18, 0x00); + r8168ep_ocp_write(tp, 0x01, 0x10, 0x01); +} + static void rtl8168_driver_start(struct rtl8169_private *tp) { if (tp->dash_type == RTL_DASH_DP) rtl8168dp_driver_start(tp); + else if (tp->dash_type == RTL_DASH_25_BP) + rtl8125bp_driver_start(tp); else rtl8168ep_driver_start(tp); } @@ -1384,10 +1397,19 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp) rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); } +static void rtl8125bp_driver_stop(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x18, 0x00); + r8168ep_ocp_write(tp, 0x01, 0x10, 0x01); +} + static void rtl8168_driver_stop(struct rtl8169_private *tp) { if (tp->dash_type == RTL_DASH_DP) rtl8168dp_driver_stop(tp); + else if (tp->dash_type == RTL_DASH_25_BP) + rtl8125bp_driver_stop(tp); else rtl8168ep_driver_stop(tp); } @@ -1410,6 +1432,7 @@ static bool rtl_dash_is_enabled(struct rtl8169_private *tp) case RTL_DASH_DP: return r8168dp_check_dash(tp); case RTL_DASH_EP: + case RTL_DASH_25_BP: return r8168ep_check_dash(tp); default: return false; @@ -1424,6 +1447,8 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp) return RTL_DASH_DP; case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: return RTL_DASH_EP; + case RTL_GIGA_MAC_VER_66: + return RTL_DASH_25_BP; default: return RTL_DASH_NONE; } @@ -1576,7 +1601,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_71: r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts); break; default: @@ -2049,7 +2074,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp) tp->tx_lpi_timer = timer_val; r8168_mac_ocp_write(tp, 0xe048, timer_val); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: tp->tx_lpi_timer = timer_val; RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val); break; @@ -2257,10 +2282,14 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) enum mac_version ver; } mac_info[] = { /* 8126A family. */ - { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 }, - { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 }, + { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_71 }, + { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70 }, + + /* 8125BP family. */ + { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66 }, /* 8125D family. */ + { 0x7cf, 0x689, RTL_GIGA_MAC_VER_65 }, { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 }, /* 8125B family. */ @@ -2336,7 +2365,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168B family. */ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, - /* This one is very old and rare, let's see if anybody complains. + /* This one is very old and rare, support has been removed. * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, */ @@ -2528,7 +2557,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_61: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST | RX_PAUSE_SLOT_ON); break; @@ -2660,7 +2689,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61: rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); break; - case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); @@ -2903,7 +2932,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: rtl_eri_set_bits(tp, 0xd4, 0x0c00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); break; default: @@ -2917,7 +2946,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: rtl_eri_clear_bits(tp, 0xd4, 0x1f00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0); break; default: @@ -2943,8 +2972,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) rtl_mod_config5(tp, 0, ASPM_en); switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_65: - case RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_70: + case RTL_GIGA_MAC_VER_71: val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN; RTL_W8(tp, INT_CFG0_8125, val8); break; @@ -2955,7 +2984,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: /* reset ephy tx/rx disable timer */ r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0); /* chip can trigger L1.2 */ @@ -2967,7 +2996,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) } else { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0); break; default: @@ -2975,8 +3004,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) } switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_65: - case RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_70: + case RTL_GIGA_MAC_VER_71: val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN; RTL_W8(tp, INT_CFG0_8125, val8); break; @@ -3696,12 +3725,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) /* disable new tx descriptor format */ r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); - if (tp->mac_version == RTL_GIGA_MAC_VER_65 || - tp->mac_version == RTL_GIGA_MAC_VER_66) + if (tp->mac_version == RTL_GIGA_MAC_VER_70 || + tp->mac_version == RTL_GIGA_MAC_VER_71) RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02); - if (tp->mac_version == RTL_GIGA_MAC_VER_65 || - tp->mac_version == RTL_GIGA_MAC_VER_66) + if (tp->mac_version == RTL_GIGA_MAC_VER_70 || + tp->mac_version == RTL_GIGA_MAC_VER_71) r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); else if (tp->mac_version == RTL_GIGA_MAC_VER_63) r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); @@ -3719,8 +3748,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); - if (tp->mac_version == RTL_GIGA_MAC_VER_65 || - tp->mac_version == RTL_GIGA_MAC_VER_66) + if (tp->mac_version == RTL_GIGA_MAC_VER_70 || + tp->mac_version == RTL_GIGA_MAC_VER_71) r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000); else r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); @@ -3804,7 +3833,6 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, [RTL_GIGA_MAC_VER_10] = NULL, - [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, @@ -3840,8 +3868,10 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, [RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d, - [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a, - [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a, + [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8125d, + [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d, + [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a, + [RTL_GIGA_MAC_VER_71] = rtl_hw_start_8126a, }; if (hw_configs[tp->mac_version]) @@ -3858,12 +3888,14 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_61: case RTL_GIGA_MAC_VER_64: + case RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_66: for (i = 0xa00; i < 0xb00; i += 4) RTL_W32(tp, i, 0); break; case RTL_GIGA_MAC_VER_63: - case RTL_GIGA_MAC_VER_65: - case RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_70: + case RTL_GIGA_MAC_VER_71: for (i = 0xa00; i < 0xa80; i += 4) RTL_W32(tp, i, 0); RTL_W16(tp, INT_CFG1_8125, 0x0000); @@ -4095,7 +4127,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp) RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: rtl_enable_rxdvgate(tp); fsleep(2000); break; @@ -4252,7 +4284,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: padto = max_t(unsigned int, padto, ETH_ZLEN); break; default: @@ -4680,12 +4712,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) if (status & LinkChg) phy_mac_interrupt(tp->phydev); - if (unlikely(status & RxFIFOOver && - tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(tp->dev); - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); - } - rtl_irq_disable(tp); napi_schedule(&tp->napi); out: @@ -4723,8 +4749,6 @@ static void rtl_task(struct work_struct *work) reset: rtl_reset_work(tp); netif_wake_queue(tp->dev); - } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) { - rtl_reset_work(tp); } } @@ -5103,9 +5127,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp) if (tp->mac_version <= RTL_GIGA_MAC_VER_06) tp->irq_mask |= SYSErr | RxFIFOOver; - else if (tp->mac_version == RTL_GIGA_MAC_VER_11) - /* special workaround needed */ - tp->irq_mask |= RxFIFOOver; } static int rtl_alloc_irq(struct rtl8169_private *tp) @@ -5281,7 +5302,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: rtl_hw_init_8125(tp); break; default: @@ -5300,7 +5321,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: return JUMBO_7K; /* RTL8168b */ - case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_17: return JUMBO_4K; /* RTL8168c */ @@ -5347,43 +5367,6 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp) return false; } -static umode_t r8169_hwmon_is_visible(const void *drvdata, - enum hwmon_sensor_types type, - u32 attr, int channel) -{ - return 0444; -} - -static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type, - u32 attr, int channel, long *val) -{ - struct rtl8169_private *tp = dev_get_drvdata(dev); - int val_raw; - - val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff; - if (val_raw >= 512) - val_raw -= 1024; - - *val = 1000 * val_raw / 2; - - return 0; -} - -static const struct hwmon_ops r8169_hwmon_ops = { - .is_visible = r8169_hwmon_is_visible, - .read = r8169_hwmon_read, -}; - -static const struct hwmon_channel_info * const r8169_hwmon_info[] = { - HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), - NULL -}; - -static const struct hwmon_chip_info r8169_hwmon_chip_info = { - .ops = &r8169_hwmon_ops, - .info = r8169_hwmon_info, -}; - static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct rtl8169_private *tp; @@ -5563,12 +5546,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - /* The temperature sensor is available from RTl8125B */ - if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63) - /* ignore errors */ - devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp, - &r8169_hwmon_chip_info, - NULL); rc = register_netdev(dev); if (rc) return rc; diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 5307c6ff4e25..cf95e579c65d 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -276,15 +276,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, rtl_writephy_batch(phydev, phy_reg_init); } -static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) -{ - phy_write(phydev, 0x1f, 0x0001); - phy_set_bits(phydev, 0x16, BIT(0)); - phy_write(phydev, 0x10, 0xf41b); - phy_write(phydev, 0x1f, 0x0000); -} - static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1111,6 +1102,28 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp, rtl8125_config_eee_phy(phydev); } +static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x8088); + phy_modify(phydev, 0x17, 0xff00, 0x9000); + phy_write(phydev, 0x16, 0x808f); + phy_modify(phydev, 0x17, 0xff00, 0x9000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800); + + rtl8125_legacy_force_mode(phydev); + rtl8168g_disable_aldps(phydev); + rtl8125_config_eee_phy(phydev); +} + static void rtl8126a_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1136,7 +1149,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, [RTL_GIGA_MAC_VER_10] = NULL, - [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, @@ -1172,8 +1184,10 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, [RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config, - [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config, - [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config, + [RTL_GIGA_MAC_VER_65] = rtl8125d_hw_phy_config, + [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config, + [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config, + [RTL_GIGA_MAC_VER_71] = rtl8126a_hw_phy_config, }; if (phy_configs[ver]) diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index dbc3f92eebc4..2bbfcad613ab 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -13,6 +13,7 @@ #define RTASE_HW_VER_906X_7XA 0x00800000 #define RTASE_HW_VER_906X_7XC 0x04000000 #define RTASE_HW_VER_907XD_V1 0x04800000 +#define RTASE_HW_VER_907XD_VA 0x08000000 #define RTASE_RX_DMA_BURST_256 4 #define RTASE_TX_DMA_BURST_UNLIMITED 7 diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index de7f11232593..3bd11cb56294 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1725,6 +1725,7 @@ static int rtase_get_settings(struct net_device *dev, cmd->base.speed = SPEED_5000; break; case RTASE_HW_VER_907XD_V1: + case RTASE_HW_VER_907XD_VA: cmd->base.speed = SPEED_10000; break; } @@ -1827,7 +1828,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp) for (i = 0; i < tp->int_nums; i++) { irq = pci_irq_vector(pdev, i); - if (!irq) { + if (irq < 0) { pci_disable_msix(pdev); return irq; } @@ -1993,6 +1994,7 @@ static int rtase_check_mac_version_valid(struct rtase_private *tp) case RTASE_HW_VER_906X_7XA: case RTASE_HW_VER_906X_7XC: case RTASE_HW_VER_907XD_V1: + case RTASE_HW_VER_907XD_VA: ret = 0; break; } @@ -2016,7 +2018,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out, SET_NETDEV_DEV(dev, &pdev->dev); ret = pci_enable_device(pdev); - if (ret < 0) + if (ret) goto err_out_free_dev; /* make sure PCI base addr 1 is MMIO */ @@ -2032,7 +2034,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out, } ret = pci_request_regions(pdev, KBUILD_MODNAME); - if (ret < 0) + if (ret) goto err_out_disable; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -2108,7 +2110,7 @@ static int rtase_init_one(struct pci_dev *pdev, dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n"); ret = rtase_init_board(pdev, &dev, &ioaddr); - if (ret != 0) + if (ret) return ret; tp = netdev_priv(dev); @@ -2118,7 +2120,7 @@ static int rtase_init_one(struct pci_dev *pdev, /* identify chip attached to board */ ret = rtase_check_mac_version_valid(tp); - if (ret != 0) { + if (ret) { dev_err(&pdev->dev, "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n", tp->hw_ver); @@ -2129,7 +2131,7 @@ static int rtase_init_one(struct pci_dev *pdev, rtase_init_hardware(tp); ret = rtase_alloc_interrupt(pdev, tp); - if (ret < 0) { + if (ret) { dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n"); goto err_out_del_napi; } @@ -2174,7 +2176,7 @@ static int rtase_init_one(struct pci_dev *pdev, netif_carrier_off(dev); ret = register_netdev(dev); - if (ret != 0) + if (ret) goto err_out_free_dma; netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ac0f093f647a..c9f4976a3527 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2763,6 +2763,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = { .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .tx_max_frame_size = SZ_2K, .rx_max_frame_size = SZ_2K, .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -3216,10 +3217,15 @@ static int ravb_suspend(struct device *dev) netif_device_detach(ndev); - if (priv->wol_enabled) - return ravb_wol_setup(ndev); + rtnl_lock(); + if (priv->wol_enabled) { + ret = ravb_wol_setup(ndev); + rtnl_unlock(); + return ret; + } ret = ravb_close(ndev); + rtnl_unlock(); if (ret) return ret; @@ -3244,19 +3250,20 @@ static int ravb_resume(struct device *dev) if (!netif_running(ndev)) return 0; + rtnl_lock(); /* If WoL is enabled restore the interface. */ - if (priv->wol_enabled) { + if (priv->wol_enabled) ret = ravb_wol_restore(ndev); - if (ret) - return ret; - } else { + else ret = pm_runtime_force_resume(dev); - if (ret) - return ret; + if (ret) { + rtnl_unlock(); + return ret; } /* Reopening the interface will restore the device to the working state. */ ret = ravb_open(ndev); + rtnl_unlock(); if (ret < 0) goto out_rpm_put; diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index dbbbf024e7ab..84d09a8973b7 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -111,25 +111,35 @@ static void rswitch_top_init(struct rswitch_private *priv) /* Forwarding engine block (MFWD) */ static void rswitch_fwd_init(struct rswitch_private *priv) { + u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0); unsigned int i; - /* For ETHA */ - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { - iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); + /* Start with empty configuration */ + for (i = 0; i < RSWITCH_NUM_AGENTS; i++) { + /* Disable all port features */ + iowrite32(0, priv->addr + FWPC0(i)); + /* Disallow L3 forwarding and direct descriptor forwarding */ + iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask), + priv->addr + FWPC1(i)); + /* Disallow L2 forwarding */ + iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask), + priv->addr + FWPC2(i)); + /* Disallow port based forwarding */ iowrite32(0, priv->addr + FWPBFC(i)); } - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + /* For enabled ETHA ports, setup port based forwarding */ + rswitch_for_each_enabled_port(priv, i) { + /* Port based forwarding from port i to GWCA port */ + rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV, + FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index))); + /* Within GWCA port, forward to Rx queue for port i */ iowrite32(priv->rdev[i]->rx_queue->index, priv->addr + FWPBFCSDC(GWCA_INDEX, i)); - iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); } - /* For GWCA */ - iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); - iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); - iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); - iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); + /* For GWCA port, allow direct descriptor forwarding */ + rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE); } /* Gateway CPU agent block (GWCA) */ @@ -547,7 +557,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) desc = &gq->ts_ring[gq->ring_size]; desc->desc.die_dt = DT_LINKFIX; rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); - INIT_LIST_HEAD(&priv->gwca.ts_info_list); return 0; } @@ -1003,9 +1012,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv) static void rswitch_ts(struct rswitch_private *priv) { struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; - struct rswitch_gwca_ts_info *ts_info, *ts_info2; struct skb_shared_hwtstamps shhwtstamps; struct rswitch_ts_desc *desc; + struct rswitch_device *rdev; + struct sk_buff *ts_skb; struct timespec64 ts; unsigned int num; u32 tag, port; @@ -1015,23 +1025,28 @@ static void rswitch_ts(struct rswitch_private *priv) dma_rmb(); port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); - tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); - - list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { - if (!(ts_info->port == port && ts_info->tag == tag)) - continue; - - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - ts.tv_sec = __le32_to_cpu(desc->ts_sec); - ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); - shhwtstamps.hwtstamp = timespec64_to_ktime(ts); - skb_tstamp_tx(ts_info->skb, &shhwtstamps); - dev_consume_skb_irq(ts_info->skb); - list_del(&ts_info->list); - kfree(ts_info); - break; - } + if (unlikely(port >= RSWITCH_NUM_PORTS)) + goto next; + rdev = priv->rdev[port]; + tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); + if (unlikely(tag >= TS_TAGS_PER_PORT)) + goto next; + ts_skb = xchg(&rdev->ts_skb[tag], NULL); + smp_mb(); /* order rdev->ts_skb[] read before bitmap update */ + clear_bit(tag, rdev->ts_skb_used); + + if (unlikely(!ts_skb)) + goto next; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + ts.tv_sec = __le32_to_cpu(desc->ts_sec); + ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + skb_tstamp_tx(ts_skb, &shhwtstamps); + dev_consume_skb_irq(ts_skb); + +next: gq->cur = rswitch_next_queue_index(gq, true, 1); desc = &gq->ts_ring[gq->cur]; } @@ -1154,9 +1169,9 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) static void rswitch_etha_enable_mii(struct rswitch_etha *etha) { - rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, - MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); - rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); + rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT, + FIELD_PREP(MPIC_PSMCS, etha->psmcs) | + FIELD_PREP(MPIC_PSMHT, 0x06)); } static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) @@ -1185,42 +1200,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); } -static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, - int phyad, int devad, int regad, int data) +static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read, + unsigned int mmf, unsigned int pda, + unsigned int pra, unsigned int pop, + unsigned int prd) { - int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45; u32 val; int ret; - if (devad == 0xffffffff) - return -ENODEV; - - writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1); - - val = MPSM_PSME | MPSM_MFF_C45; - iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); + val = MPSM_PSME | + FIELD_PREP(MPSM_MFF, mmf) | + FIELD_PREP(MPSM_PDA, pda) | + FIELD_PREP(MPSM_PRA, pra) | + FIELD_PREP(MPSM_POP, pop) | + FIELD_PREP(MPSM_PRD, prd); + iowrite32(val, etha->addr + MPSM); - ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); + ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0); if (ret) return ret; - rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); - if (read) { - writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); - - ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); - if (ret) - return ret; - - ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; - - rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); - } else { - iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val, - etha->addr + MPSM); - - ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS); + val = ioread32(etha->addr + MPSM); + ret = FIELD_GET(MPSM_PRD, val); } return ret; @@ -1230,16 +1232,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, int regad) { struct rswitch_etha *etha = bus->priv; + int ret; - return rswitch_etha_set_access(etha, true, addr, devad, regad, 0); + ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, + MPSM_POP_ADDRESS, regad); + if (ret) + return ret; + + return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad, + MPSM_POP_READ_C45, 0); } static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, int regad, u16 val) { struct rswitch_etha *etha = bus->priv; + int ret; + + ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, + MPSM_POP_ADDRESS, regad); + if (ret) + return ret; + + return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, + MPSM_POP_WRITE, val); +} + +static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad) +{ + struct rswitch_etha *etha = bus->priv; - return rswitch_etha_set_access(etha, false, addr, devad, regad, val); + return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad, + MPSM_POP_READ_C22, 0); +} + +static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad, + int regad, u16 val) +{ + struct rswitch_etha *etha = bus->priv; + + return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad, + MPSM_POP_WRITE, val); } /* Call of_node_put(port) after done */ @@ -1324,6 +1357,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev) mii_bus->priv = rdev->etha; mii_bus->read_c45 = rswitch_etha_mii_read_c45; mii_bus->write_c45 = rswitch_etha_mii_write_c45; + mii_bus->read = rswitch_etha_mii_read_c22; + mii_bus->write = rswitch_etha_mii_write_c22; mii_bus->parent = &rdev->priv->pdev->dev; mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); @@ -1544,7 +1579,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) { unsigned int i; - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { phy_exit(priv->rdev[i]->serdes); rswitch_ether_port_deinit_one(priv->rdev[i]); } @@ -1576,8 +1611,9 @@ static int rswitch_open(struct net_device *ndev) static int rswitch_stop(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); - struct rswitch_gwca_ts_info *ts_info, *ts_info2; + struct sk_buff *ts_skb; unsigned long flags; + unsigned int tag; netif_tx_stop_all_queues(ndev); @@ -1594,12 +1630,13 @@ static int rswitch_stop(struct net_device *ndev) if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); - list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { - if (ts_info->port != rdev->port) - continue; - dev_kfree_skb_irq(ts_info->skb); - list_del(&ts_info->list); - kfree(ts_info); + for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); + tag < TS_TAGS_PER_PORT; + tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) { + ts_skb = xchg(&rdev->ts_skb[tag], NULL); + clear_bit(tag, rdev->ts_skb_used); + if (ts_skb) + dev_kfree_skb(ts_skb); } return 0; @@ -1612,20 +1649,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev, desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - struct rswitch_gwca_ts_info *ts_info; + unsigned int tag; - ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); - if (!ts_info) + tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); + if (tag == TS_TAGS_PER_PORT) return false; + smp_mb(); /* order bitmap read before rdev->ts_skb[] write */ + rdev->ts_skb[tag] = skb_get(skb); + set_bit(tag, rdev->ts_skb_used); skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - rdev->ts_tag++; - desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); - - ts_info->skb = skb_get(skb); - ts_info->port = rdev->port; - ts_info->tag = rdev->ts_tag; - list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); + desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC); skb_tx_timestamp(skb); } @@ -1920,9 +1954,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index if (err < 0) goto out_get_params; - if (rdev->priv->gwca.speed < rdev->etha->speed) - rdev->priv->gwca.speed = rdev->etha->speed; - err = rswitch_rxdmac_alloc(ndev); if (err < 0) goto out_rxdmac; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index e020800dcc57..532192cbca4b 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -12,6 +12,7 @@ #define RSWITCH_MAX_NUM_QUEUES 128 +#define RSWITCH_NUM_AGENTS 5 #define RSWITCH_NUM_PORTS 3 #define rswitch_for_each_enabled_port(priv, i) \ for (i = 0; i < RSWITCH_NUM_PORTS; i++) \ @@ -731,28 +732,21 @@ enum rswitch_etha_mode { #define MPIC_LSC_100M 1 #define MPIC_LSC_1G 2 #define MPIC_LSC_2_5G 3 - -#define MDIO_READ_C45 0x03 -#define MDIO_WRITE_C45 0x01 +#define MPIC_PSMCS GENMASK(22, 16) +#define MPIC_PSMHT GENMASK(26, 24) #define MPSM_PSME BIT(0) -#define MPSM_MFF_C45 BIT(2) -#define MPSM_PRD_SHIFT 16 -#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT) - -/* Completion flags */ -#define MMIS1_PAACS BIT(2) /* Address */ -#define MMIS1_PWACS BIT(1) /* Write */ -#define MMIS1_PRACS BIT(0) /* Read */ -#define MMIS1_CLEAR_FLAGS 0xf - -#define MPIC_PSMCS_SHIFT 16 -#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT) -#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT) - -#define MPIC_PSMHT_SHIFT 24 -#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT) -#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT) +#define MPSM_MFF BIT(2) +#define MPSM_MMF_C22 0 +#define MPSM_MMF_C45 1 +#define MPSM_PDA GENMASK(7, 3) +#define MPSM_PRA GENMASK(12, 8) +#define MPSM_POP GENMASK(14, 13) +#define MPSM_POP_ADDRESS 0 +#define MPSM_POP_WRITE 1 +#define MPSM_POP_READ_C22 2 +#define MPSM_POP_READ_C45 3 +#define MPSM_PRD GENMASK(31, 16) #define MLVC_PLV BIT(16) @@ -806,6 +800,7 @@ enum rswitch_gwca_mode { #define CABPPFLC_INIT_VALUE 0x00800080 /* MFWD */ +#define FWPC0(i) (FWPC00 + (i) * 0x10) #define FWPC0_LTHTA BIT(0) #define FWPC0_IP4UE BIT(3) #define FWPC0_IP4TE BIT(4) @@ -819,15 +814,15 @@ enum rswitch_gwca_mode { #define FWPC0_MACHMA BIT(27) #define FWPC0_VLANSA BIT(28) -#define FWPC0(i) (FWPC00 + (i) * 0x10) -#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \ - FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \ - FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \ - FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA) #define FWPC1(i) (FWPC10 + (i) * 0x10) +#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16) #define FWPC1_DDE BIT(0) -#define FWPBFC(i) (FWPBFC0 + (i) * 0x10) +#define FWPC2(i) (FWPC20 + (i) * 0x10) +#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16) + +#define FWPBFC(i) (FWPBFC0 + (i) * 0x10) +#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0) #define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04) @@ -972,14 +967,6 @@ struct rswitch_gwca_queue { }; }; -struct rswitch_gwca_ts_info { - struct sk_buff *skb; - struct list_head list; - - int port; - u8 tag; -}; - #define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32)) struct rswitch_gwca { unsigned int index; @@ -989,14 +976,13 @@ struct rswitch_gwca { struct rswitch_gwca_queue *queues; int num_queues; struct rswitch_gwca_queue ts_queue; - struct list_head ts_info_list; DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES); u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS]; u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS]; - int speed; }; #define NUM_QUEUES_PER_NDEV 2 +#define TS_TAGS_PER_PORT 256 struct rswitch_device { struct rswitch_private *priv; struct net_device *ndev; @@ -1004,7 +990,8 @@ struct rswitch_device { void __iomem *addr; struct rswitch_gwca_queue *tx_queue; struct rswitch_gwca_queue *rx_queue; - u8 ts_tag; + struct sk_buff *ts_skb[TS_TAGS_PER_PORT]; + DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT); bool disabled; int port; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 8887b8921009..5fc8027c92c7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3494,10 +3494,12 @@ static int sh_eth_suspend(struct device *dev) netif_device_detach(ndev); + rtnl_lock(); if (mdp->wol_enabled) ret = sh_eth_wol_setup(ndev); else ret = sh_eth_close(ndev); + rtnl_unlock(); return ret; } @@ -3511,10 +3513,12 @@ static int sh_eth_resume(struct device *dev) if (!netif_running(ndev)) return 0; + rtnl_lock(); if (mdp->wol_enabled) ret = sh_eth_wol_restore(ndev); else ret = sh_eth_open(ndev); + rtnl_unlock(); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 4cc7b501135f..ef374a8e05c3 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -217,28 +217,4 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, (reg) != 0xa1c), \ page) -/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug - * in the BIU means that writes to TIMER_COMMAND[0] invalidate the - * collector register. - */ -static inline void _efx_writed_page_locked(struct efx_nic *efx, - const efx_dword_t *value, - unsigned int reg, - unsigned int page) -{ - unsigned long flags __attribute__ ((unused)); - - if (page == 0) { - spin_lock_irqsave(&efx->biu_lock, flags); - efx_writed(efx, value, efx_paged_reg(efx, page, reg)); - spin_unlock_irqrestore(&efx->biu_lock, flags); - } else { - efx_writed(efx, value, efx_paged_reg(efx, page, reg)); - } -} -#define efx_writed_page_locked(efx, value, reg, page) \ - _efx_writed_page_locked(efx, value, \ - reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ - page) - #endif /* EFX_IO_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 620ba6ef3514..f70a7b7d6345 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -831,6 +831,7 @@ struct efx_arfs_rule { /** * struct efx_async_filter_insertion - Request to asynchronously insert a filter * @net_dev: Reference to the netdevice + * @net_dev_tracker: reference tracker entry for @net_dev * @spec: The filter to insert * @work: Workitem for this request * @rxq_index: Identifies the channel for which this request was made @@ -838,6 +839,7 @@ struct efx_arfs_rule { */ struct efx_async_filter_insertion { struct net_device *net_dev; + netdevice_tracker net_dev_tracker; struct efx_filter_spec spec; struct work_struct work; u16 rxq_index; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index ab358fe13e1d..4cc83203e188 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -897,7 +897,7 @@ static void efx_filter_rfs_work(struct work_struct *data) /* Release references */ clear_bit(slot_idx, &efx->rps_slot_map); - dev_put(req->net_dev); + netdev_put(req->net_dev, &req->net_dev_tracker); } int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, @@ -989,7 +989,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, } /* Queue the request */ - dev_hold(req->net_dev = net_dev); + req->net_dev = net_dev; + netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC); INIT_WORK(&req->work, efx_filter_rfs_work); req->rxq_index = rxq_index; req->flow_id = flow_id; diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h index 9785eff10607..2be3bad3c993 100644 --- a/drivers/net/ethernet/sfc/siena/net_driver.h +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -753,6 +753,7 @@ struct efx_arfs_rule { /** * struct efx_async_filter_insertion - Request to asynchronously insert a filter * @net_dev: Reference to the netdevice + * @net_dev_tracker: reference tracker entry for @net_dev * @spec: The filter to insert * @work: Workitem for this request * @rxq_index: Identifies the channel for which this request was made @@ -760,6 +761,7 @@ struct efx_arfs_rule { */ struct efx_async_filter_insertion { struct net_device *net_dev; + netdevice_tracker net_dev_tracker; struct efx_filter_spec spec; struct work_struct work; u16 rxq_index; diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c index 082e35c6caaa..2839d0e0a9c1 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.c +++ b/drivers/net/ethernet/sfc/siena/rx_common.c @@ -888,7 +888,7 @@ static void efx_filter_rfs_work(struct work_struct *data) /* Release references */ clear_bit(slot_idx, &efx->rps_slot_map); - dev_put(req->net_dev); + netdev_put(req->net_dev, &req->net_dev_tracker); } int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, @@ -980,7 +980,8 @@ int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, } /* Queue the request */ - dev_hold(req->net_dev = net_dev); + req->net_dev = net_dev; + netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC); INIT_WORK(&req->work, efx_filter_rfs_work); req->rxq_index = rxq_index; req->flow_id = flow_id; diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c index d90206f27161..c0603f54cec3 100644 --- a/drivers/net/ethernet/sfc/tc_conntrack.c +++ b/drivers/net/ethernet/sfc/tc_conntrack.c @@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data, void *cb_priv); static const struct rhashtable_params efx_tc_ct_zone_ht_params = { - .key_len = offsetof(struct efx_tc_ct_zone, linkage), + .key_len = sizeof_field(struct efx_tc_ct_zone, zone), .key_offset = 0, .head_offset = offsetof(struct efx_tc_ct_zone, linkage), }; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 6658536a4e17..4cc85a36a1ab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -154,6 +154,18 @@ config DWMAC_RZN1 the stmmac device driver. This support can make use of a custom MII converter PCS device. +config DWMAC_S32 + tristate "NXP S32G/S32R GMAC support" + default ARCH_S32 + depends on OF && (ARCH_S32 || COMPILE_TEST) + help + Support for ethernet controller on NXP S32CC SOCs. + + This selects NXP SoC glue layer support for the stmmac + device driver. This driver is used for the S32CC series + SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and + S32R45. + config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default ARCH_INTEL_SOCFPGA diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 2389fd261344..b26f0e79c2b3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o +obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1367fa5c9b8e..e25db747a81a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -257,6 +257,8 @@ struct stmmac_safety_stats { #define CSR_F_150M 150000000 #define CSR_F_250M 250000000 #define CSR_F_300M 300000000 +#define CSR_F_500M 500000000 +#define CSR_F_800M 800000000 #define MAC_CSR_H_FRQ_MASK 0x20 @@ -543,18 +545,8 @@ struct dma_features { #define STMMAC_VLAN_INSERT 0x2 #define STMMAC_VLAN_REPLACE 0x3 -extern const struct stmmac_desc_ops enh_desc_ops; -extern const struct stmmac_desc_ops ndesc_ops; - struct mac_device_info; -extern const struct stmmac_hwtimestamp stmmac_ptp; -extern const struct stmmac_hwtimestamp dwmac1000_ptp; -extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; - -extern const struct ptp_clock_info stmmac_ptp_clock_ops; -extern const struct ptp_clock_info dwmac1000_ptp_clock_ops; - struct mac_link { u32 caps; u32 speed_mask; @@ -641,8 +633,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); -extern const struct stmmac_mode_ops ring_mode_ops; -extern const struct stmmac_mode_ops chain_mode_ops; -extern const struct stmmac_desc_ops dwmac4_desc_ops; - #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 83290e707df5..bd4eb187f8c6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -181,24 +181,19 @@ static void dwc_qos_remove(struct platform_device *pdev) static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode) { struct tegra_eqos *eqos = priv; - unsigned long rate = 125000000; bool needs_calibration = false; + long rate = 125000000; u32 value; int err; switch (speed) { case SPEED_1000: - needs_calibration = true; - rate = 125000000; - break; - case SPEED_100: needs_calibration = true; - rate = 25000000; - break; + fallthrough; case SPEED_10: - rate = 2500000; + rate = rgmii_clock(speed); break; default: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 641f3cd019a3..20d3a202bb8d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -36,6 +36,8 @@ #define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1) #define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1) #define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0) +#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0) +#define MX93_GPR_CLK_SEL_OFFSET (4) #define DMA_BUS_MODE 0x00001000 #define DMA_BUS_MODE_SFT_RESET (0x1 << 0) @@ -108,13 +110,21 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat) { struct imx_priv_data *dwmac = plat_dat->bsp_priv; - int val; + int val, ret; switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: val = MX93_GPR_ENET_QOS_INTF_SEL_MII; break; case PHY_INTERFACE_MODE_RMII: + if (dwmac->rmii_refclk_ext) { + ret = regmap_clear_bits(dwmac->intf_regmap, + dwmac->intf_reg_off + + MX93_GPR_CLK_SEL_OFFSET, + MX93_GPR_ENET_QOS_CLK_SEL_MASK); + if (ret) + return ret; + } val = MX93_GPR_ENET_QOS_INTF_SEL_RMII; break; case PHY_INTERFACE_MODE_RGMII: @@ -186,7 +196,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod { struct plat_stmmacenet_data *plat_dat; struct imx_priv_data *dwmac = priv; - unsigned long rate; + long rate; int err; plat_dat = dwmac->plat_dat; @@ -196,17 +206,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII)) return; - switch (speed) { - case SPEED_1000: - rate = 125000000; - break; - case SPEED_100: - rate = 25000000; - break; - case SPEED_10: - rate = 2500000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dwmac->dev, "invalid speed %u\n", speed); return; } @@ -301,15 +302,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) * is required by i.MX8MP, i.MX93. * is optinoal for i.MX8DXL. */ - dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode"); + dwmac->intf_regmap = + syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1, + &dwmac->intf_reg_off); if (IS_ERR(dwmac->intf_regmap)) return PTR_ERR(dwmac->intf_regmap); - - err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off); - if (err) { - dev_err(dev, "Can't get intf mode reg offset (%d)\n", err); - return err; - } } return err; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index d94f0a150e93..ddee6154d40b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -31,27 +31,13 @@ struct intel_dwmac_data { static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct intel_dwmac *dwmac = priv; - unsigned long rate; + long rate; int ret; - rate = clk_get_rate(dwmac->tx_clk); - - switch (speed) { - case SPEED_1000: - rate = 125000000; - break; - - case SPEED_100: - rate = 25000000; - break; - - case SPEED_10: - rate = 2500000; - break; - - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dwmac->dev, "Invalid speed\n"); - break; + return; } ret = clk_set_rate(dwmac->tx_clk, rate); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 901a3c1959fa..2a5b38723635 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -777,7 +777,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv) netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err); plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref); - netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate); + netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate); } static int qcom_ethqos_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 8cb374668b74..a4dc89e23a68 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1079,20 +1079,11 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) { struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; struct device *dev = &bsp_priv->pdev->dev; - unsigned long rate; + long rate; int ret; - switch (speed) { - case 10: - rate = 2500000; - break; - case 100: - rate = 25000000; - break; - case 1000: - rate = 125000000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dev, "unknown speed value for GMAC speed=%d", speed); return; } @@ -1540,20 +1531,11 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; struct device *dev = &bsp_priv->pdev->dev; - unsigned long rate; + long rate; int ret; - switch (speed) { - case 10: - rate = 2500000; - break; - case 100: - rate = 25000000; - break; - case 1000: - rate = 125000000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dev, "unknown speed value for RGMII speed=%d", speed); return; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c new file mode 100644 index 000000000000..9cc0e5817416 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NXP S32G/R GMAC glue layer + * + * Copyright 2019-2024 NXP + * + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_address.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/stmmac.h> + +#include "stmmac_platform.h" + +#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */ + +/* SoC PHY interface control register */ +#define PHY_INTF_SEL_MII 0x00 +#define PHY_INTF_SEL_SGMII 0x01 +#define PHY_INTF_SEL_RGMII 0x02 +#define PHY_INTF_SEL_RMII 0x08 + +struct s32_priv_data { + void __iomem *ioaddr; + void __iomem *ctrl_sts; + struct device *dev; + phy_interface_t *intf_mode; + struct clk *tx_clk; + struct clk *rx_clk; +}; + +static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac) +{ + writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts); + + dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode)); + + return 0; +} + +static int s32_gmac_init(struct platform_device *pdev, void *priv) +{ + struct s32_priv_data *gmac = priv; + int ret; + + /* Set initial TX interface clock */ + ret = clk_prepare_enable(gmac->tx_clk); + if (ret) { + dev_err(&pdev->dev, "Can't enable tx clock\n"); + return ret; + } + ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M); + if (ret) { + dev_err(&pdev->dev, "Can't set tx clock\n"); + goto err_tx_disable; + } + + /* Set initial RX interface clock */ + ret = clk_prepare_enable(gmac->rx_clk); + if (ret) { + dev_err(&pdev->dev, "Can't enable rx clock\n"); + goto err_tx_disable; + } + ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M); + if (ret) { + dev_err(&pdev->dev, "Can't set rx clock\n"); + goto err_txrx_disable; + } + + /* Set interface mode */ + ret = s32_gmac_write_phy_intf_select(gmac); + if (ret) { + dev_err(&pdev->dev, "Can't set PHY interface mode\n"); + goto err_txrx_disable; + } + + return 0; + +err_txrx_disable: + clk_disable_unprepare(gmac->rx_clk); +err_tx_disable: + clk_disable_unprepare(gmac->tx_clk); + return ret; +} + +static void s32_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct s32_priv_data *gmac = priv; + + clk_disable_unprepare(gmac->tx_clk); + clk_disable_unprepare(gmac->rx_clk); +} + +static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) +{ + struct s32_priv_data *gmac = priv; + long tx_clk_rate; + int ret; + + tx_clk_rate = rgmii_clock(speed); + if (tx_clk_rate < 0) { + dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed); + return; + } + + dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate); + ret = clk_set_rate(gmac->tx_clk, tx_clk_rate); + if (ret) + dev_err(gmac->dev, "Can't set tx clock\n"); +} + +static int s32_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat; + struct device *dev = &pdev->dev; + struct stmmac_resources res; + struct s32_priv_data *gmac; + int ret; + + gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return -ENOMEM; + + gmac->dev = &pdev->dev; + + ret = stmmac_get_platform_resources(pdev, &res); + if (ret) + return dev_err_probe(dev, ret, + "Failed to get platform resources\n"); + + plat = devm_stmmac_probe_config_dt(pdev, res.mac); + if (IS_ERR(plat)) + return dev_err_probe(dev, PTR_ERR(plat), + "dt configuration failed\n"); + + /* PHY interface mode control reg */ + gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); + if (IS_ERR(gmac->ctrl_sts)) + return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts), + "S32CC config region is missing\n"); + + /* tx clock */ + gmac->tx_clk = devm_clk_get(&pdev->dev, "tx"); + if (IS_ERR(gmac->tx_clk)) + return dev_err_probe(dev, PTR_ERR(gmac->tx_clk), + "tx clock not found\n"); + + /* rx clock */ + gmac->rx_clk = devm_clk_get(&pdev->dev, "rx"); + if (IS_ERR(gmac->rx_clk)) + return dev_err_probe(dev, PTR_ERR(gmac->rx_clk), + "rx clock not found\n"); + + gmac->intf_mode = &plat->phy_interface; + gmac->ioaddr = res.addr; + + /* S32CC core feature set */ + plat->has_gmac4 = true; + plat->pmt = 1; + plat->flags |= STMMAC_FLAG_SPH_DISABLE; + plat->rx_fifo_size = 20480; + plat->tx_fifo_size = 20480; + + plat->init = s32_gmac_init; + plat->exit = s32_gmac_exit; + plat->fix_mac_speed = s32_fix_mac_speed; + + plat->bsp_priv = gmac; + + return stmmac_pltfr_probe(pdev, plat, &res); +} + +static const struct of_device_id s32_dwmac_match[] = { + { .compatible = "nxp,s32g2-dwmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, s32_dwmac_match); + +static struct platform_driver s32_dwmac_driver = { + .probe = s32_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "s32-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = s32_dwmac_match, + }, +}; +module_platform_driver(s32_dwmac_driver); + +MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>"); +MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 421666279dd3..0a0a363d3730 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -34,24 +34,13 @@ struct starfive_dwmac { static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct starfive_dwmac *dwmac = priv; - unsigned long rate; + long rate; int err; - rate = clk_get_rate(dwmac->clk_tx); - - switch (speed) { - case SPEED_1000: - rate = 125000000; - break; - case SPEED_100: - rate = 25000000; - break; - case SPEED_10: - rate = 2500000; - break; - default: + rate = rgmii_clock(speed); + if (rate < 0) { dev_err(dwmac->dev, "invalid speed %u\n", speed); - break; + return; } err = clk_set_rate(dwmac->clk_tx, rate); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index a6ff02d905a9..f25461c292fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -21,10 +21,7 @@ #include "stmmac_platform.h" -#define DWMAC_125MHZ 125000000 #define DWMAC_50MHZ 50000000 -#define DWMAC_25MHZ 25000000 -#define DWMAC_2_5MHZ 2500000 #define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ iface == PHY_INTERFACE_MODE_RGMII_ID || \ @@ -140,7 +137,7 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode) struct sti_dwmac *dwmac = priv; u32 src = dwmac->tx_retime_src; u32 reg = dwmac->ctrl_reg; - u32 freq = 0; + long freq = 0; if (dwmac->interface == PHY_INTERFACE_MODE_MII) { src = TX_RETIME_SRC_TXCLK; @@ -153,19 +150,14 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode) } } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { /* On GiGa clk source can be either ext or from clkgen */ - if (spd == SPEED_1000) { - freq = DWMAC_125MHZ; - } else { + freq = rgmii_clock(spd); + + if (spd != SPEED_1000 && freq > 0) /* Switch to clkgen for these speeds */ src = TX_RETIME_SRC_CLKGEN; - if (spd == SPEED_100) - freq = DWMAC_25MHZ; - else if (spd == SPEED_10) - freq = DWMAC_2_5MHZ; - } } - if (src == TX_RETIME_SRC_CLKGEN && freq) + if (src == TX_RETIME_SRC_CLKGEN && freq > 0) clk_set_rate(dwmac->clk, freq); regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK, @@ -207,16 +199,11 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, if (res) dwmac->clk_sel_reg = res->start; - regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); + regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon", + 1, &dwmac->ctrl_reg); if (IS_ERR(regmap)) return PTR_ERR(regmap); - err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg); - if (err) { - dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err); - return err; - } - err = of_get_phy_mode(np, &dwmac->interface); if (err && err != -ENODEV) { dev_err(dev, "Can't get phy-mode\n"); @@ -321,7 +308,6 @@ static void sti_dwmac_remove(struct platform_device *pdev) clk_disable_unprepare(dwmac->clk); } -#ifdef CONFIG_PM_SLEEP static int sti_dwmac_suspend(struct device *dev) { struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev); @@ -341,10 +327,9 @@ static int sti_dwmac_resume(struct device *dev) return stmmac_resume(dev); } -#endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend, - sti_dwmac_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend, + sti_dwmac_resume); static const struct sti_dwmac_of_data stih4xx_dwmac_data = { .fix_retime_src = stih4xx_fix_retime_src, @@ -361,7 +346,7 @@ static struct platform_driver sti_dwmac_driver = { .remove = sti_dwmac_remove, .driver = { .name = "sti-dwmac", - .pm = &sti_dwmac_pm_ops, + .pm = pm_sleep_ptr(&sti_dwmac_pm_ops), .of_match_table = sti_dwmac_match, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 1e8bac665cc9..1fcb74e9e3ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -419,16 +419,11 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, } /* Get mode register */ - dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); + dwmac->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon", + 1, &dwmac->mode_reg); if (IS_ERR(dwmac->regmap)) return PTR_ERR(dwmac->regmap); - err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg); - if (err) { - dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err); - return err; - } - if (dwmac->ops->is_mp2) return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index 3827997d2132..dc903b846b1b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/iommu.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/module.h> @@ -19,6 +20,8 @@ struct tegra_mgbe { struct reset_control *rst_mac; struct reset_control *rst_pcs; + u32 iommu_sid; + void __iomem *hv; void __iomem *regs; void __iomem *xpcs; @@ -50,7 +53,6 @@ struct tegra_mgbe { #define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704 #define MAC_SBD_INTR BIT(2) #define MGBE_WRAP_AXI_ASID0_CTRL 0x8400 -#define MGBE_SID 0x6 static int __maybe_unused tegra_mgbe_suspend(struct device *dev) { @@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev) writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE); /* Program SID */ - writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL); + writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL); value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS); if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) { @@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev) if (IS_ERR(mgbe->xpcs)) return PTR_ERR(mgbe->xpcs); + /* get controller's stream id from iommu property in device tree */ + if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) { + dev_err(mgbe->dev, "failed to get iommu stream id\n"); + return -EINVAL; + } + res.addr = mgbe->regs; res.irq = irq; @@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE); /* Program SID */ - writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL); + writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL); plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index c25781874aa7..9ed8620580a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -27,7 +27,7 @@ static void dwmac4_core_init(struct mac_device_info *hw, struct stmmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONFIG); - u32 clk_rate; + unsigned long clk_rate; value |= GMAC_CORE_INIT; @@ -420,10 +420,10 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); } -static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et) +static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et) { void __iomem *ioaddr = hw->pcsr; - int value = et & STMMAC_ET_MAX; + u32 value = et & STMMAC_ET_MAX; int regval; /* Program LPI entry timer value into register */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h index 1ce6f43d545a..806555976496 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -144,4 +144,7 @@ /* TDS3 use for both format (read and write back) */ #define RDES3_OWN BIT(31) +extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; +extern const struct stmmac_desc_ops dwmac4_desc_ops; + #endif /* __DWMAC4_DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index a04a79003692..20027d3c25a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -493,4 +493,9 @@ #define XGMAC_RDES3_TSD BIT(6) #define XGMAC_RDES3_TSA BIT(4) +extern const struct stmmac_ops dwxgmac210_ops; +extern const struct stmmac_ops dwxlgmac2_ops; +extern const struct stmmac_dma_ops dwxgmac210_dma_ops; +extern const struct stmmac_desc_ops dwxgmac210_desc_ops; + #endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index a72d336a8350..31bdbab9a46c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -9,6 +9,8 @@ #include "stmmac_fpe.h" #include "stmmac_ptp.h" #include "stmmac_est.h" +#include "dwmac4_descs.h" +#include "dwxgmac2.h" static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) { @@ -265,7 +267,7 @@ static const struct stmmac_hwif_entry { .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = NULL, - .tc = &dwxgmac_tc_ops, + .tc = &dwmac510_tc_ops, .mmc = &dwxgmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwxgmac2_setup, @@ -288,7 +290,7 @@ static const struct stmmac_hwif_entry { .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = NULL, - .tc = &dwxgmac_tc_ops, + .tc = &dwmac510_tc_ops, .mmc = &dwxgmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwxlgmac2_setup, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 64f8ed67dcc4..0f200b72c225 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -363,7 +363,7 @@ struct stmmac_ops { void (*set_eee_mode)(struct mac_device_info *hw, bool en_tx_lpi_clockgating); void (*reset_eee_mode)(struct mac_device_info *hw); - void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et); + void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -665,6 +665,15 @@ struct stmmac_regs_off { u32 est_off; }; +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; + +extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_hwtimestamp dwmac1000_ptp; + +extern const struct stmmac_mode_ops ring_mode_ops; +extern const struct stmmac_mode_ops chain_mode_ops; + extern const struct stmmac_ops dwmac100_ops; extern const struct stmmac_dma_ops dwmac100_dma_ops; extern const struct stmmac_ops dwmac1000_ops; @@ -676,14 +685,6 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops; extern const struct stmmac_ops dwmac510_ops; extern const struct stmmac_tc_ops dwmac4_tc_ops; extern const struct stmmac_tc_ops dwmac510_tc_ops; -extern const struct stmmac_tc_ops dwxgmac_tc_ops; -extern const struct stmmac_ops dwxgmac210_ops; -extern const struct stmmac_ops dwxlgmac2_ops; -extern const struct stmmac_dma_ops dwxgmac210_dma_ops; -extern const struct stmmac_desc_ops dwxgmac210_desc_ops; -extern const struct stmmac_mmc_ops dwmac_mmc_ops; -extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; -extern const struct stmmac_est_ops dwmac510_est_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 5d1ea3e07459..1cba39fb2c44 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -139,4 +139,7 @@ struct stmmac_counters { unsigned int mmc_rx_fpe_fragment_cntr; }; +extern const struct stmmac_mmc_ops dwmac_mmc_ops; +extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; + #endif /* __MMC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 1d86439b8a14..f05cae103d83 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -126,7 +126,7 @@ struct stmmac_rx_queue { unsigned int cur_rx; unsigned int dirty_rx; unsigned int buf_alloc_num; - u32 rx_zeroc_thresh; + unsigned int napi_skb_frag_size; dma_addr_t dma_rx_phy; u32 rx_tail_addr; unsigned int state_saved; @@ -266,7 +266,6 @@ struct stmmac_priv { int sph_cap; u32 sarc_type; - unsigned int rx_copybreak; u32 rx_riwt[MTL_MAX_TX_QUEUES]; int hwts_rx_en; @@ -307,11 +306,9 @@ struct stmmac_priv { int clk_csr; struct timer_list eee_ctrl_timer; int lpi_irq; - int eee_enabled; - int eee_active; - int tx_lpi_timer; - int tx_lpi_enabled; - int eee_tw_timer; + u32 tx_lpi_timer; + bool eee_enabled; + bool eee_active; bool eee_sw_timer_en; unsigned int mode; unsigned int chain_mode; @@ -407,8 +404,6 @@ void stmmac_dvr_remove(struct device *dev); int stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res); -void stmmac_disable_eee_mode(struct stmmac_priv *priv); -bool stmmac_eee_init(struct stmmac_priv *priv); int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); @@ -418,14 +413,6 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) return !!priv->xdp_prog; } -static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) -{ - if (stmmac_xdp_is_enabled(priv)) - return XDP_PACKET_HEADROOM; - - return 0; -} - void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h index 7a858c566e7e..d247fa383a6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h @@ -62,3 +62,5 @@ #define EST_SRWO BIT(0) #define EST_GCL_DATA 0x00000034 + +extern const struct stmmac_est_ops dwmac510_est_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 1d77389ce953..918a32f8fda8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -654,7 +654,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, (*(u32 *)p); } } - if (priv->eee_enabled) { + if (priv->dma_cap.eee) { int val = phylink_get_eee_err(priv->phylink); if (val) priv->xstats.phy_eee_wakeup_error_n = val; @@ -898,9 +898,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev, if (!priv->dma_cap.eee) return -EOPNOTSUPP; - edata->tx_lpi_timer = priv->tx_lpi_timer; - edata->tx_lpi_enabled = priv->tx_lpi_enabled; - return phylink_ethtool_get_eee(priv->phylink, edata); } @@ -908,29 +905,11 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct ethtool_keee *edata) { struct stmmac_priv *priv = netdev_priv(dev); - int ret; if (!priv->dma_cap.eee) return -EOPNOTSUPP; - if (priv->tx_lpi_enabled != edata->tx_lpi_enabled) - netdev_warn(priv->dev, - "Setting EEE tx-lpi is not supported\n"); - - if (!edata->eee_enabled) - stmmac_disable_eee_mode(priv); - - ret = phylink_ethtool_set_eee(priv->phylink, edata); - if (ret) - return ret; - - if (edata->eee_enabled && - priv->tx_lpi_timer != edata->tx_lpi_timer) { - priv->tx_lpi_timer = edata->tx_lpi_timer; - stmmac_eee_init(priv); - } - - return 0; + return phylink_ethtool_set_eee(priv->phylink, edata); } static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) @@ -1227,43 +1206,6 @@ static int stmmac_get_ts_info(struct net_device *dev, return ethtool_op_get_ts_info(dev, info); } -static int stmmac_get_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, void *data) -{ - struct stmmac_priv *priv = netdev_priv(dev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - *(u32 *)data = priv->rx_copybreak; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int stmmac_set_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, - const void *data) -{ - struct stmmac_priv *priv = netdev_priv(dev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - priv->rx_copybreak = *(u32 *)data; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - static int stmmac_get_mm(struct net_device *ndev, struct ethtool_mm_state *state) { @@ -1390,8 +1332,6 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .set_per_queue_coalesce = stmmac_set_per_queue_coalesce, .get_channels = stmmac_get_channels, .set_channels = stmmac_set_channels, - .get_tunable = stmmac_get_tunable, - .set_tunable = stmmac_set_tunable, .get_link_ksettings = stmmac_ethtool_get_link_ksettings, .set_link_ksettings = stmmac_ethtool_set_link_ksettings, .get_mm = stmmac_get_mm, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c81ea8cdfe6e..c0ae7db96f46 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -77,7 +77,6 @@ module_param(phyaddr, int, 0444); MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) -#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) /* Limit to make sure XDP TX and slow path can coexist */ #define STMMAC_XSK_TX_BUDGET_MAX 256 @@ -107,15 +106,13 @@ static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, 0644); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); -#define STMMAC_RX_COPYBREAK 256 - static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); #define STMMAC_DEFAULT_LPI_TIMER 1000 -static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; -module_param(eee_timer, int, 0644); +static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER; +module_param(eee_timer, uint, 0644); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) @@ -197,8 +194,6 @@ static void stmmac_verify_args(void) flow_ctrl = FLOW_OFF; if (unlikely((pause < 0) || (pause > 0xffff))) pause = PAUSE_TIME; - if (eee_timer < 0) - eee_timer = STMMAC_DEFAULT_LPI_TIMER; } static void __stmmac_disable_all_queues(struct stmmac_priv *priv) @@ -301,7 +296,7 @@ static void stmmac_global_err(struct stmmac_priv *priv) */ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { - u32 clk_rate; + unsigned long clk_rate; clk_rate = clk_get_rate(priv->plat->stmmac_clk); @@ -325,6 +320,10 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_150_250M; else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; + else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M)) + priv->clk_csr = STMMAC_CSR_300_500M; + else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M)) + priv->clk_csr = STMMAC_CSR_500_800M; } if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { @@ -391,23 +390,17 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) return dirty; } -static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) +static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv) { - int tx_lpi_timer; + stmmac_set_eee_lpi_timer(priv, priv->hw, 0); +} - /* Clear/set the SW EEE timer flag based on LPI ET enablement */ - priv->eee_sw_timer_en = en ? 0 : 1; - tx_lpi_timer = en ? priv->tx_lpi_timer : 0; - stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); +static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv) +{ + stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer); } -/** - * stmmac_enable_eee_mode - check and enter in LPI mode - * @priv: driver private structure - * Description: this function is to verify and enter in LPI mode in case of - * EEE. - */ -static int stmmac_enable_eee_mode(struct stmmac_priv *priv) +static bool stmmac_eee_tx_busy(struct stmmac_priv *priv) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queue; @@ -417,29 +410,43 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv) struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; if (tx_q->dirty_tx != tx_q->cur_tx) - return -EBUSY; /* still unfinished work */ + return true; /* still unfinished work */ + } + + return false; +} + +static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv) +{ + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); +} + +/** + * stmmac_try_to_start_sw_lpi - check and enter in LPI mode + * @priv: driver private structure + * Description: this function is to verify and enter in LPI mode in case of + * EEE. + */ +static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv) +{ + if (stmmac_eee_tx_busy(priv)) { + stmmac_restart_sw_lpi_timer(priv); + return; } /* Check and enter in LPI mode */ if (!priv->tx_path_in_lpi_mode) stmmac_set_eee_mode(priv, priv->hw, priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); - return 0; } /** - * stmmac_disable_eee_mode - disable and exit from LPI mode + * stmmac_stop_sw_lpi - stop transmitting LPI * @priv: driver private structure - * Description: this function is to exit and disable EEE in case of - * LPI state is true. This is called by the xmit. + * Description: When using software-controlled LPI, stop transmitting LPI state. */ -void stmmac_disable_eee_mode(struct stmmac_priv *priv) +static void stmmac_stop_sw_lpi(struct stmmac_priv *priv) { - if (!priv->eee_sw_timer_en) { - stmmac_lpi_entry_timer_config(priv, 0); - return; - } - stmmac_reset_eee_mode(priv, priv->hw); del_timer_sync(&priv->eee_ctrl_timer); priv->tx_path_in_lpi_mode = false; @@ -456,25 +463,27 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t) { struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); - if (stmmac_enable_eee_mode(priv)) - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); + stmmac_try_to_start_sw_lpi(priv); } /** * stmmac_eee_init - init EEE * @priv: driver private structure + * @active: indicates whether EEE should be enabled. * Description: * if the GMAC supports the EEE (from the HW cap reg) and the phy device * can also manage EEE, this function enable the LPI state and start related * timer. */ -bool stmmac_eee_init(struct stmmac_priv *priv) +static void stmmac_eee_init(struct stmmac_priv *priv, bool active) { - int eee_tw_timer = priv->eee_tw_timer; + priv->eee_active = active; /* Check if MAC core supports the EEE feature. */ - if (!priv->dma_cap.eee) - return false; + if (!priv->dma_cap.eee) { + priv->eee_enabled = false; + return; + } mutex_lock(&priv->lock); @@ -482,22 +491,24 @@ bool stmmac_eee_init(struct stmmac_priv *priv) if (!priv->eee_active) { if (priv->eee_enabled) { netdev_dbg(priv->dev, "disable EEE\n"); - stmmac_lpi_entry_timer_config(priv, 0); + priv->eee_sw_timer_en = false; + stmmac_disable_hw_lpi_timer(priv); del_timer_sync(&priv->eee_ctrl_timer); - stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); + stmmac_set_eee_timer(priv, priv->hw, 0, + STMMAC_DEFAULT_TWT_LS); if (priv->hw->xpcs) xpcs_config_eee(priv->hw->xpcs, priv->plat->mult_fact_100ns, false); } + priv->eee_enabled = false; mutex_unlock(&priv->lock); - return false; + return; } if (priv->eee_active && !priv->eee_enabled) { - timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, - eee_tw_timer); + STMMAC_DEFAULT_TWT_LS); if (priv->hw->xpcs) xpcs_config_eee(priv->hw->xpcs, priv->plat->mult_fact_100ns, @@ -505,18 +516,22 @@ bool stmmac_eee_init(struct stmmac_priv *priv) } if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { + /* Use hardware LPI mode */ del_timer_sync(&priv->eee_ctrl_timer); priv->tx_path_in_lpi_mode = false; - stmmac_lpi_entry_timer_config(priv, 1); + priv->eee_sw_timer_en = false; + stmmac_enable_hw_lpi_timer(priv); } else { - stmmac_lpi_entry_timer_config(priv, 0); - mod_timer(&priv->eee_ctrl_timer, - STMMAC_LPI_T(priv->tx_lpi_timer)); + /* Use software LPI mode */ + priv->eee_sw_timer_en = true; + stmmac_disable_hw_lpi_timer(priv); + stmmac_restart_sw_lpi_timer(priv); } + priv->eee_enabled = true; + mutex_unlock(&priv->lock); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); - return true; } /* stmmac_get_tx_hwtstamp - get HW TX timestamps @@ -973,10 +988,8 @@ static void stmmac_mac_link_down(struct phylink_config *config, struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); stmmac_mac_set(priv, priv->ioaddr, false); - priv->eee_active = false; - priv->tx_lpi_enabled = false; - priv->eee_enabled = stmmac_eee_init(priv); - stmmac_set_eee_pls(priv, priv->hw, false); + if (priv->dma_cap.eee) + stmmac_set_eee_pls(priv, priv->hw, false); if (stmmac_fpe_supported(priv)) stmmac_fpe_link_state_handle(priv, false); @@ -1083,14 +1096,8 @@ static void stmmac_mac_link_up(struct phylink_config *config, writel(ctrl, priv->ioaddr + MAC_CTRL_REG); stmmac_mac_set(priv, priv->ioaddr, true); - if (phy && priv->dma_cap.eee) { - priv->eee_active = - phy_init_eee(phy, !(priv->plat->flags & - STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; - priv->eee_enabled = stmmac_eee_init(priv); - priv->tx_lpi_enabled = priv->eee_enabled; + if (priv->dma_cap.eee) stmmac_set_eee_pls(priv, priv->hw, true); - } if (stmmac_fpe_supported(priv)) stmmac_fpe_link_state_handle(priv, true); @@ -1099,12 +1106,32 @@ static void stmmac_mac_link_up(struct phylink_config *config, stmmac_hwtstamp_correct_latency(priv, priv); } +static void stmmac_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + stmmac_eee_init(priv, false); +} + +static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + priv->tx_lpi_timer = timer; + stmmac_eee_init(priv, true); + + return 0; +} + static const struct phylink_mac_ops stmmac_phylink_mac_ops = { .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, .mac_link_down = stmmac_mac_link_down, .mac_link_up = stmmac_mac_link_up, + .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi, + .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi, }; /** @@ -1177,15 +1204,27 @@ static int stmmac_init_phy(struct net_device *dev) return -ENODEV; } - if (priv->dma_cap.eee) - phy_support_eee(phydev); - ret = phylink_connect_phy(priv->phylink, phydev); } else { fwnode_handle_put(phy_fwnode); ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); } + if (ret == 0) { + struct ethtool_keee eee; + + /* Configure phylib's copy of the LPI timer. Normally, + * phylink_config.lpi_timer_default would do this, but there is + * a chance that userspace could change the eee_timer setting + * via sysfs before the first open. Thus, preserve existing + * behaviour. + */ + if (!phylink_ethtool_get_eee(priv->phylink, &eee)) { + eee.tx_lpi_timer = priv->tx_lpi_timer; + phylink_ethtool_set_eee(priv->phylink, &eee); + } + } + if (!priv->plat->pmt) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; @@ -1202,6 +1241,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) struct stmmac_mdio_bus_data *mdio_bus_data; int mode = priv->plat->phy_interface; struct fwnode_handle *fwnode; + struct phylink_pcs *pcs; struct phylink *phylink; priv->phylink_config.dev = &priv->dev->dev; @@ -1211,6 +1251,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) /* Stmmac always requires an RX clock for hardware initialization */ priv->phylink_config.mac_requires_rxc = true; + if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) + priv->phylink_config.eee_rx_clk_stop_enable = true; + mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) priv->phylink_config.default_an_inband = @@ -1223,8 +1266,27 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) /* If we have an xpcs, it defines which PHY interfaces are supported. */ if (priv->hw->xpcs) - xpcs_get_interfaces(priv->hw->xpcs, - priv->phylink_config.supported_interfaces); + pcs = xpcs_to_phylink_pcs(priv->hw->xpcs); + else + pcs = priv->hw->phylink_pcs; + + if (pcs) + phy_interface_or(priv->phylink_config.supported_interfaces, + priv->phylink_config.supported_interfaces, + pcs->supported_interfaces); + + if (priv->dma_cap.eee) { + /* Assume all supported interfaces also support LPI */ + memcpy(priv->phylink_config.lpi_interfaces, + priv->phylink_config.supported_interfaces, + sizeof(priv->phylink_config.lpi_interfaces)); + + /* All full duplex speeds above 100Mbps are supported */ + priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) | + MAC_100FD; + priv->phylink_config.lpi_timer_default = eee_timer * 1000; + priv->phylink_config.eee_enabled_default = true; + } fwnode = priv->plat->port_node; if (!fwnode) @@ -1307,6 +1369,14 @@ static void stmmac_display_rings(struct stmmac_priv *priv, stmmac_display_tx_rings(priv, dma_conf); } +static unsigned int stmmac_rx_offset(struct stmmac_priv *priv) +{ + if (stmmac_xdp_is_enabled(priv)) + return XDP_PACKET_HEADROOM; + + return NET_SKB_PAD; +} + static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; @@ -2003,22 +2073,31 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_channel *ch = &priv->channel[queue]; bool xdp_prog = stmmac_xdp_is_enabled(priv); struct page_pool_params pp_params = { 0 }; - unsigned int num_pages; + unsigned int dma_buf_sz_pad, num_pages; unsigned int napi_id; int ret; + dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE); + rx_q->queue_index = queue; rx_q->priv_data = priv; + rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = dma_conf->dma_rx_size; - num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); - pp_params.order = ilog2(num_pages); + pp_params.order = order_base_2(num_pages); pp_params.nid = dev_to_node(priv->device); pp_params.dev = priv->device; pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; pp_params.offset = stmmac_rx_offset(priv); - pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); + pp_params.max_len = dma_conf->dma_buf_sz; + + if (priv->sph) { + pp_params.offset = 0; + pp_params.max_len += stmmac_rx_offset(priv); + } rx_q->page_pool = page_pool_create(&pp_params); if (IS_ERR(rx_q->page_pool)) { @@ -2757,11 +2836,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, xmits = budget; } - if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && - priv->eee_sw_timer_en) { - if (stmmac_enable_eee_mode(priv)) - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); - } + if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode) + stmmac_restart_sw_lpi_timer(priv); /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) @@ -3436,12 +3512,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) else if (ptp_register) stmmac_ptp_register(priv); - priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; - - /* Convert the timer from msec to usec */ - if (!priv->tx_lpi_timer) - priv->tx_lpi_timer = eee_timer * 1000; - if (priv->use_riwt) { u32 queue; @@ -3908,6 +3978,10 @@ static int __stmmac_open(struct net_device *dev, u32 chan; int ret; + /* Initialise the tx lpi timer, converting from msec to usec */ + if (!priv->tx_lpi_timer) + priv->tx_lpi_timer = eee_timer * 1000; + ret = pm_runtime_resume_and_get(priv->device); if (ret < 0) return ret; @@ -3923,8 +3997,6 @@ static int __stmmac_open(struct net_device *dev, } } - priv->rx_copybreak = STMMAC_RX_COPYBREAK; - buf_sz = dma_conf->dma_buf_sz; for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) @@ -4024,11 +4096,6 @@ static int stmmac_release(struct net_device *dev) /* Free the IRQ lines */ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); - if (priv->eee_enabled) { - priv->tx_path_in_lpi_mode = false; - del_timer_sync(&priv->eee_ctrl_timer); - } - /* Stop TX/RX DMA and clear the descriptors */ stmmac_stop_all_dma(priv); @@ -4117,11 +4184,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, desc = &tx_q->dma_tx[tx_q->cur_tx]; curr_addr = des + (total_len - tmp_len); - if (priv->dma_cap.addr64 <= 32) - desc->des0 = cpu_to_le32(curr_addr); - else - stmmac_set_desc_addr(priv, desc, curr_addr); - + stmmac_set_desc_addr(priv, desc, curr_addr); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? TSO_MAX_BUFF_SIZE : tmp_len; @@ -4167,17 +4230,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) * First Descriptor * -------- * | DES0 |---> buffer1 = L2/L3/L4 header - * | DES1 |---> TCP Payload (can continue on next descr...) - * | DES2 |---> buffer 1 and 2 len + * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address + * | | width is 32-bit, but we never use it. + * | | Also can be used as the most-significant 8-bits or 16-bits of + * | | buffer1 address pointer if the DMA AXI address width is 40-bit + * | | or 48-bit, and we always use it. + * | DES2 |---> buffer1 len * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] * -------- + * -------- + * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...) + * | DES1 |---> same as the First Descriptor + * | DES2 |---> buffer1 len + * | DES3 | + * -------- * | * ... * | * -------- - * | DES0 | --| Split TCP Payload on Buffers 1 and 2 - * | DES1 | --| - * | DES2 | --> buffer 1 and 2 len + * | DES0 |---> buffer1 = Split TCP Payload + * | DES1 |---> same as the First Descriptor + * | DES2 |---> buffer1 len * | DES3 | * -------- * @@ -4187,15 +4260,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); - int tmp_pay_len = 0, first_tx, nfrags; unsigned int first_entry, tx_packets; struct stmmac_txq_stats *txq_stats; struct stmmac_tx_queue *tx_q; u32 pay_len, mss, queue; - dma_addr_t tso_des, des; + int i, first_tx, nfrags; u8 proto_hdr_len, hdr; + dma_addr_t des; bool set_ic; - int i; /* Always insert VLAN tag to SKB payload for TSO frames. * @@ -4280,24 +4352,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; - if (priv->dma_cap.addr64 <= 32) { - first->des0 = cpu_to_le32(des); - - /* Fill start of payload in buff2 of first descriptor */ - if (pay_len) - first->des1 = cpu_to_le32(des + proto_hdr_len); - - /* If needed take extra descriptors to fill the remaining payload */ - tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; - tso_des = des; - } else { - stmmac_set_desc_addr(priv, first, des); - tmp_pay_len = pay_len; - tso_des = des + proto_hdr_len; - pay_len = 0; - } - - stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue); + stmmac_set_desc_addr(priv, first, des); + stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len, + (nfrags == 0), queue); /* In case two or more DMA transmit descriptors are allocated for this * non-paged SKB data, the DMA buffer address should be saved to @@ -4401,11 +4458,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) } /* Complete the first descriptor before granting the DMA */ - stmmac_prepare_tso_tx_desc(priv, first, 1, - proto_hdr_len, - pay_len, - 1, tx_q->tx_skbuff_dma[first_entry].last_segment, - hdr / 4, (skb->len - proto_hdr_len)); + stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1, + tx_q->tx_skbuff_dma[first_entry].last_segment, + hdr / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ if (mss_desc) { @@ -4492,7 +4547,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) - stmmac_disable_eee_mode(priv); + stmmac_stop_sw_lpi(priv); /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { @@ -5473,7 +5528,7 @@ read_again: if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { - page_pool_recycle_direct(rx_q->page_pool, buf->page); + page_pool_put_page(rx_q->page_pool, buf->page, 0, true); buf->page = NULL; error = 1; if (!priv->hwts_rx_en) @@ -5491,10 +5546,6 @@ read_again: /* Buffer is good. Go on. */ - prefetch(page_address(buf->page) + buf->page_offset); - if (buf->sec_page) - prefetch(page_address(buf->sec_page)); - buf1_len = stmmac_rx_buf1_len(priv, p, status, len); len += buf1_len; buf2_len = stmmac_rx_buf2_len(priv, p, status, len); @@ -5516,6 +5567,8 @@ read_again: dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); + net_prefetch(page_address(buf->page) + + buf->page_offset); xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); xdp_prepare_buff(&ctx.xdp, page_address(buf->page), @@ -5569,22 +5622,26 @@ read_again: } if (!skb) { + unsigned int head_pad_len; + /* XDP program may expand or reduce tail */ buf1_len = ctx.xdp.data_end - ctx.xdp.data; - skb = napi_alloc_skb(&ch->rx_napi, buf1_len); + skb = napi_build_skb(page_address(buf->page), + rx_q->napi_skb_frag_size); if (!skb) { + page_pool_recycle_direct(rx_q->page_pool, + buf->page); rx_dropped++; count++; goto drain_data; } /* XDP program may adjust header */ - skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len); + head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start; + skb_reserve(skb, head_pad_len); skb_put(skb, buf1_len); - - /* Data payload copied into SKB, page ready for recycle */ - page_pool_recycle_direct(rx_q->page_pool, buf->page); + skb_mark_for_recycle(skb); buf->page = NULL; } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, @@ -5592,9 +5649,6 @@ read_again: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->page, buf->page_offset, buf1_len, priv->dma_conf.dma_buf_sz); - - /* Data payload appended into SKB */ - skb_mark_for_recycle(skb); buf->page = NULL; } @@ -5604,9 +5658,6 @@ read_again: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, priv->dma_conf.dma_buf_sz); - - /* Data payload appended into SKB */ - skb_mark_for_recycle(skb); buf->sec_page = NULL; } @@ -6489,11 +6540,7 @@ static int stmmac_device_event(struct notifier_block *unused, switch (event) { case NETDEV_CHANGENAME: - if (priv->dbgfs_dir) - priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, - priv->dbgfs_dir, - stmmac_fs_dir, - dev->name); + debugfs_change_name(priv->dbgfs_dir, "%s", dev->name); break; } done: @@ -7175,6 +7222,36 @@ static int stmmac_hw_init(struct stmmac_priv *priv) if (priv->dma_cap.tsoen) dev_info(priv->device, "TSO supported\n"); + if (priv->dma_cap.number_rx_queues && + priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) { + dev_warn(priv->device, + "Number of Rx queues (%u) exceeds dma capability\n", + priv->plat->rx_queues_to_use); + priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues; + } + if (priv->dma_cap.number_tx_queues && + priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) { + dev_warn(priv->device, + "Number of Tx queues (%u) exceeds dma capability\n", + priv->plat->tx_queues_to_use); + priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues; + } + + if (priv->dma_cap.rx_fifo_size && + priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) { + dev_warn(priv->device, + "Rx FIFO size (%u) exceeds dma capability\n", + priv->plat->rx_fifo_size); + priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size; + } + if (priv->dma_cap.tx_fifo_size && + priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) { + dev_warn(priv->device, + "Tx FIFO size (%u) exceeds dma capability\n", + priv->plat->tx_fifo_size); + priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size; + } + priv->hw->vlan_fail_q_en = (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; @@ -7407,6 +7484,8 @@ int stmmac_dvr_probe(struct device *device, INIT_WORK(&priv->service_task, stmmac_service_task); + timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); + /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ @@ -7714,7 +7793,7 @@ int stmmac_suspend(struct device *dev) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); - if (priv->eee_enabled) { + if (priv->eee_sw_timer_en) { priv->tx_path_in_lpi_mode = false; del_timer_sync(&priv->eee_ctrl_timer); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3ac32444e492..d0e61aa1a495 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -406,22 +406,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np) } /** - * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() - * @pdev: platform_device structure - * @plat: driver data platform structure - * - * Release resources claimed by stmmac_probe_config_dt(). - */ -static void stmmac_remove_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) -{ - clk_disable_unprepare(plat->stmmac_clk); - clk_disable_unprepare(plat->pclk); - of_node_put(plat->phy_node); - of_node_put(plat->mdio_node); -} - -/** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure * @mac: MAC address to use @@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); rc = stmmac_mdio_setup(plat, np, &pdev->dev); - if (rc) - return ERR_PTR(rc); + if (rc) { + ret = ERR_PTR(rc); + goto error_put_phy; + } of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); @@ -581,8 +567,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) { - stmmac_remove_config_dt(pdev, plat); - return ERR_PTR(-ENOMEM); + ret = ERR_PTR(-ENOMEM); + goto error_put_mdio; } plat->dma_cfg = dma_cfg; @@ -610,8 +596,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) rc = stmmac_mtl_setup(pdev, plat); if (rc) { - stmmac_remove_config_dt(pdev, plat); - return ERR_PTR(rc); + ret = ERR_PTR(rc); + goto error_put_mdio; } /* clock setup */ @@ -640,7 +626,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) dev_info(&pdev->dev, "PTP uses main clock\n"); } else { plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); - dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); + dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate); } plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev, @@ -663,6 +649,10 @@ error_hw_init: clk_disable_unprepare(plat->pclk); error_pclk_get: clk_disable_unprepare(plat->stmmac_clk); +error_put_mdio: + of_node_put(plat->mdio_node); +error_put_phy: + of_node_put(plat->phy_node); return ret; } @@ -671,16 +661,17 @@ static void devm_stmmac_remove_config_dt(void *data) { struct plat_stmmacenet_data *plat = data; - /* Platform data argument is unused */ - stmmac_remove_config_dt(NULL, plat); + clk_disable_unprepare(plat->stmmac_clk); + clk_disable_unprepare(plat->pclk); + of_node_put(plat->mdio_node); + of_node_put(plat->phy_node); } /** * devm_stmmac_probe_config_dt * @pdev: platform_device structure * @mac: MAC address to use - * Description: Devres variant of stmmac_probe_config_dt(). Does not require - * the user to call stmmac_remove_config_dt() at driver detach. + * Description: Devres variant of stmmac_probe_config_dt(). */ struct plat_stmmacenet_data * devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 4cc70480ce0f..3fe0e3a80e80 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -104,4 +104,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp, void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time); void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv); +extern const struct ptp_clock_info stmmac_ptp_clock_ops; +extern const struct ptp_clock_info dwmac1000_ptp_clock_ops; + #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 6a79e6a111ed..694d6ee14381 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -1284,14 +1284,3 @@ const struct stmmac_tc_ops dwmac510_tc_ops = { .query_caps = tc_query_caps, .setup_mqprio = tc_setup_dwmac510_mqprio, }; - -const struct stmmac_tc_ops dwxgmac_tc_ops = { - .init = tc_init, - .setup_cls_u32 = tc_setup_cls_u32, - .setup_cbs = tc_setup_cbs, - .setup_cls = tc_setup_cls, - .setup_taprio = tc_setup_taprio, - .setup_etf = tc_setup_etf, - .query_caps = tc_query_caps, - .setup_mqprio = tc_setup_dwmac510_mqprio, -}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h index 896dc987d4ef..77ce8cfbe976 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h @@ -4,7 +4,6 @@ #ifndef _STMMAC_XDP_H_ #define _STMMAC_XDP_H_ -#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) #define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index df6d35d41b97..72177fea1cfb 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3303,7 +3303,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, addr &= PAGE_MASK; pp = &rp->rxhash[h]; for (; (p = *pp) != NULL; pp = &niu_next_page(p)) { - if (p->index == addr) { + if (p->private == addr) { *link = pp; goto found; } @@ -3318,7 +3318,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) { unsigned int h = niu_hash_rxaddr(rp, base); - page->index = base; + page->private = base; niu_next_page(page) = rp->rxhash[h]; rp->rxhash[h] = page; } @@ -3400,11 +3400,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; - if ((page->index + PAGE_SIZE) - rcr_size == addr) { + if ((page->private + PAGE_SIZE) - rcr_size == addr) { *link = niu_next_page(page); - np->ops->unmap_page(np->device, page->index, + np->ops->unmap_page(np->device, page->private, PAGE_SIZE, DMA_FROM_DEVICE); - page->index = 0; + page->private = 0; niu_next_page(page) = NULL; __free_page(page); rp->rbr_refill_pending++; @@ -3469,11 +3469,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, append_size = append_size - skb->len; niu_rx_skb_append(skb, page, off, append_size, rcr_size); - if ((page->index + rp->rbr_block_size) - rcr_size == addr) { + if ((page->private + rp->rbr_block_size) - rcr_size == addr) { *link = niu_next_page(page); - np->ops->unmap_page(np->device, page->index, + np->ops->unmap_page(np->device, page->private, PAGE_SIZE, DMA_FROM_DEVICE); - page->index = 0; + page->private = 0; niu_next_page(page) = NULL; rp->rbr_refill_pending++; } else @@ -3538,11 +3538,11 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) page = rp->rxhash[i]; while (page) { struct page *next = niu_next_page(page); - u64 base = page->index; + u64 base = page->private; np->ops->unmap_page(np->device, base, PAGE_SIZE, DMA_FROM_DEVICE); - page->index = 0; + page->private = 0; niu_next_page(page) = NULL; __free_page(page); @@ -6086,7 +6086,7 @@ static void niu_enable_napi(struct niu *np) int i; for (i = 0; i < np->num_ldg; i++) - napi_enable(&np->ldg[i].napi); + napi_enable_locked(&np->ldg[i].napi); } static void niu_disable_napi(struct niu *np) @@ -6116,7 +6116,9 @@ static int niu_open(struct net_device *dev) if (err) goto out_free_channels; + netdev_lock(dev); niu_enable_napi(np); + netdev_unlock(dev); spin_lock_irq(&np->lock); @@ -6460,7 +6462,7 @@ static void niu_reset_buffers(struct niu *np) page = rp->rxhash[j]; while (page) { struct page *next = niu_next_page(page); - u64 base = page->index; + u64 base = page->private; base = base >> RBR_DESCR_ADDR_SHIFT; rp->rbr[k++] = cpu_to_le32(base); page = next; @@ -6521,6 +6523,7 @@ static void niu_reset_task(struct work_struct *work) niu_reset_buffers(np); + netdev_lock(np->dev); spin_lock_irqsave(&np->lock, flags); err = niu_init_hw(np); @@ -6531,6 +6534,7 @@ static void niu_reset_task(struct work_struct *work) } spin_unlock_irqrestore(&np->lock, flags); + netdev_unlock(np->dev); } static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue) @@ -6761,7 +6765,9 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu) niu_free_channels(np); + netdev_lock(dev); niu_enable_napi(np); + netdev_unlock(dev); err = niu_alloc_channels(np); if (err) @@ -9937,6 +9943,7 @@ static int __maybe_unused niu_resume(struct device *dev_d) spin_lock_irqsave(&np->lock, flags); + netdev_lock(dev); err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; @@ -9945,6 +9952,7 @@ static int __maybe_unused niu_resume(struct device *dev_d) } spin_unlock_irqrestore(&np->lock, flags); + netdev_unlock(dev); return err; } diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index 721d8ed3f302..5e0e4c9ecbb0 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -199,7 +199,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = spl2sw_ethernet_start_xmit, .ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode, .ndo_set_mac_address = spl2sw_ethernet_set_mac_address, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = spl2sw_ethernet_tx_timeout, }; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 14e1df721f2e..2806238629f8 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -32,6 +32,7 @@ #include <linux/dma/ti-cppi5.h> #include <linux/dma/k3-udma-glue.h> #include <net/page_pool/helpers.h> +#include <net/dsa.h> #include <net/switchdev.h> #include "cpsw_ale.h" @@ -497,35 +498,62 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common); static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common); static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port); static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); +static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, + struct page *page, + bool allow_direct); +static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma); +static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma); -static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common) +static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct am65_cpsw_rx_flow *flow; struct xdp_rxq_info *rxq; - int id, port; + int port; - for (id = 0; id < common->rx_ch_num_flows; id++) { - flow = &rx_chn->flows[id]; + flow = &rx_chn->flows[id]; + napi_disable(&flow->napi_rx); + hrtimer_cancel(&flow->rx_hrtimer); + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn, + am65_cpsw_nuss_rx_cleanup, !!id); - for (port = 0; port < common->port_num; port++) { - if (!common->ports[port].ndev) - continue; + for (port = 0; port < common->port_num; port++) { + if (!common->ports[port].ndev) + continue; - rxq = &common->ports[port].xdp_rxq[id]; + rxq = &common->ports[port].xdp_rxq[id]; - if (xdp_rxq_info_is_reg(rxq)) - xdp_rxq_info_unreg(rxq); - } + if (xdp_rxq_info_is_reg(rxq)) + xdp_rxq_info_unreg(rxq); + } - if (flow->page_pool) { - page_pool_destroy(flow->page_pool); - flow->page_pool = NULL; - } + if (flow->page_pool) { + page_pool_destroy(flow->page_pool); + flow->page_pool = NULL; + } +} + +static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common) +{ + struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + int id; + + reinit_completion(&common->tdown_complete); + k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true); + + if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) { + id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); + if (!id) + dev_err(common->dev, "rx teardown timeout\n"); } + + for (id = common->rx_ch_num_flows - 1; id >= 0; id--) + am65_cpsw_destroy_rxq(common, id); + + k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn); } -static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common) +static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct page_pool_params pp_params = { @@ -540,45 +568,162 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common) struct am65_cpsw_rx_flow *flow; struct xdp_rxq_info *rxq; struct page_pool *pool; - int id, port, ret; + struct page *page; + int port, ret, i; + + flow = &rx_chn->flows[id]; + pp_params.napi = &flow->napi_rx; + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) { + ret = PTR_ERR(pool); + return ret; + } + + flow->page_pool = pool; + + /* using same page pool is allowed as no running rx handlers + * simultaneously for both ndevs + */ + for (port = 0; port < common->port_num; port++) { + if (!common->ports[port].ndev) + /* FIXME should we BUG here? */ + continue; + + rxq = &common->ports[port].xdp_rxq[id]; + ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev, + id, flow->napi_rx.napi_id); + if (ret) + goto err; + + ret = xdp_rxq_info_reg_mem_model(rxq, + MEM_TYPE_PAGE_POOL, + pool); + if (ret) + goto err; + } + + for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) { + page = page_pool_dev_alloc_pages(flow->page_pool); + if (!page) { + dev_err(common->dev, "cannot allocate page in flow %d\n", + id); + ret = -ENOMEM; + goto err; + } + + ret = am65_cpsw_nuss_rx_push(common, page, id); + if (ret < 0) { + dev_err(common->dev, + "cannot submit page to rx channel flow %d, error %d\n", + id, ret); + am65_cpsw_put_page(flow, page, false); + goto err; + } + } + + napi_enable(&flow->napi_rx); + return 0; + +err: + am65_cpsw_destroy_rxq(common, id); + return ret; +} + +static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common) +{ + int id, ret; for (id = 0; id < common->rx_ch_num_flows; id++) { - flow = &rx_chn->flows[id]; - pp_params.napi = &flow->napi_rx; - pool = page_pool_create(&pp_params); - if (IS_ERR(pool)) { - ret = PTR_ERR(pool); + ret = am65_cpsw_create_rxq(common, id); + if (ret) { + dev_err(common->dev, "couldn't create rxq %d: %d\n", + id, ret); goto err; } + } + + ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn); + if (ret) { + dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); + goto err; + } - flow->page_pool = pool; + return 0; - /* using same page pool is allowed as no running rx handlers - * simultaneously for both ndevs - */ - for (port = 0; port < common->port_num; port++) { - if (!common->ports[port].ndev) - continue; +err: + for (--id; id >= 0; id--) + am65_cpsw_destroy_rxq(common, id); + + return ret; +} + +static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id) +{ + struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id]; + + napi_disable(&tx_chn->napi_tx); + hrtimer_cancel(&tx_chn->tx_hrtimer); + k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn, + am65_cpsw_nuss_tx_cleanup); + k3_udma_glue_disable_tx_chn(tx_chn->tx_chn); +} + +static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common) +{ + struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; + int id; - rxq = &common->ports[port].xdp_rxq[id]; + /* shutdown tx channels */ + atomic_set(&common->tdown_cnt, common->tx_ch_num); + /* ensure new tdown_cnt value is visible */ + smp_mb__after_atomic(); + reinit_completion(&common->tdown_complete); + + for (id = 0; id < common->tx_ch_num; id++) + k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false); + + id = wait_for_completion_timeout(&common->tdown_complete, + msecs_to_jiffies(1000)); + if (!id) + dev_err(common->dev, "tx teardown timeout\n"); + + for (id = common->tx_ch_num - 1; id >= 0; id--) + am65_cpsw_destroy_txq(common, id); +} + +static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id) +{ + struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id]; + int ret; + + ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn); + if (ret) + return ret; - ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev, - id, flow->napi_rx.napi_id); - if (ret) - goto err; + napi_enable(&tx_chn->napi_tx); - ret = xdp_rxq_info_reg_mem_model(rxq, - MEM_TYPE_PAGE_POOL, - pool); - if (ret) - goto err; + return 0; +} + +static int am65_cpsw_create_txqs(struct am65_cpsw_common *common) +{ + int id, ret; + + for (id = 0; id < common->tx_ch_num; id++) { + ret = am65_cpsw_create_txq(common, id); + if (ret) { + dev_err(common->dev, "couldn't create txq %d: %d\n", + id, ret); + goto err; } } return 0; err: - am65_cpsw_destroy_xdp_rxqs(common); + for (--id; id >= 0; id--) + am65_cpsw_destroy_txq(common, id); + return ret; } @@ -642,7 +787,6 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); } @@ -684,21 +828,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) { struct am65_cpsw_tx_chn *tx_chn = data; + enum am65_cpsw_tx_buf_type buf_type; struct cppi5_host_desc_t *desc_tx; + struct xdp_frame *xdpf; struct sk_buff *skb; void **swdata; desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_tx); - skb = *(swdata); - am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); + buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); + if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { + skb = *(swdata); + dev_kfree_skb_any(skb); + } else { + xdpf = *(swdata); + xdp_return_frame(xdpf); + } - dev_kfree_skb_any(skb); + am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); } static struct sk_buff *am65_cpsw_build_skb(void *page_addr, struct net_device *ndev, - unsigned int len) + unsigned int len, + unsigned int headroom) { struct sk_buff *skb; @@ -708,7 +861,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr, if (unlikely(!skb)) return NULL; - skb_reserve(skb, AM65_CPSW_HEADROOM); + skb_reserve(skb, headroom); skb->dev = ndev; return skb; @@ -717,12 +870,8 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr, static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) { struct am65_cpsw_host *host_p = am65_common_get_host(common); - struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; - struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; - int port_idx, i, ret, tx, flow_idx; - struct am65_cpsw_rx_flow *flow; u32 val, port_mask; - struct page *page; + int port_idx, ret; if (common->usage_count) return 0; @@ -762,7 +911,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) ALE_DEFAULT_THREAD_ID, 0); cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_DEFAULT_THREAD_ENABLE, 1); - /* switch to vlan unaware mode */ + /* switch to vlan aware mode */ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1); cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); @@ -782,151 +931,38 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) am65_cpsw_qos_tx_p0_rate_init(common); - ret = am65_cpsw_create_xdp_rxqs(common); - if (ret) { - dev_err(common->dev, "Failed to create XDP rx queues\n"); + ret = am65_cpsw_create_rxqs(common); + if (ret) return ret; - } - - for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) { - flow = &rx_chn->flows[flow_idx]; - for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) { - page = page_pool_dev_alloc_pages(flow->page_pool); - if (!page) { - dev_err(common->dev, "cannot allocate page in flow %d\n", - flow_idx); - ret = -ENOMEM; - goto fail_rx; - } - ret = am65_cpsw_nuss_rx_push(common, page, flow_idx); - if (ret < 0) { - dev_err(common->dev, - "cannot submit page to rx channel flow %d, error %d\n", - flow_idx, ret); - am65_cpsw_put_page(flow, page, false); - goto fail_rx; - } - } - } - - ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn); - if (ret) { - dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); - goto fail_rx; - } - - for (i = 0; i < common->rx_ch_num_flows ; i++) { - napi_enable(&rx_chn->flows[i].napi_rx); - if (rx_chn->flows[i].irq_disabled) { - rx_chn->flows[i].irq_disabled = false; - enable_irq(rx_chn->flows[i].irq); - } - } - - for (tx = 0; tx < common->tx_ch_num; tx++) { - ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn); - if (ret) { - dev_err(common->dev, "couldn't enable tx chn %d: %d\n", - tx, ret); - tx--; - goto fail_tx; - } - napi_enable(&tx_chn[tx].napi_tx); - } + ret = am65_cpsw_create_txqs(common); + if (ret) + goto cleanup_rx; dev_dbg(common->dev, "cpsw_nuss started\n"); return 0; -fail_tx: - while (tx >= 0) { - napi_disable(&tx_chn[tx].napi_tx); - k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn); - tx--; - } - - for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) { - flow = &rx_chn->flows[flow_idx]; - if (!flow->irq_disabled) { - disable_irq(flow->irq); - flow->irq_disabled = true; - } - napi_disable(&flow->napi_rx); - } - - k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); - -fail_rx: - for (i = 0; i < common->rx_ch_num_flows; i++) - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, - am65_cpsw_nuss_rx_cleanup, !!i); - - am65_cpsw_destroy_xdp_rxqs(common); +cleanup_rx: + am65_cpsw_destroy_rxqs(common); return ret; } static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) { - struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; - struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; - int i; - if (common->usage_count != 1) return 0; cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); - /* shutdown tx channels */ - atomic_set(&common->tdown_cnt, common->tx_ch_num); - /* ensure new tdown_cnt value is visible */ - smp_mb__after_atomic(); - reinit_completion(&common->tdown_complete); - - for (i = 0; i < common->tx_ch_num; i++) - k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false); - - i = wait_for_completion_timeout(&common->tdown_complete, - msecs_to_jiffies(1000)); - if (!i) - dev_err(common->dev, "tx timeout\n"); - for (i = 0; i < common->tx_ch_num; i++) { - napi_disable(&tx_chn[i].napi_tx); - hrtimer_cancel(&tx_chn[i].tx_hrtimer); - } - - for (i = 0; i < common->tx_ch_num; i++) { - k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i], - am65_cpsw_nuss_tx_cleanup); - k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn); - } - - reinit_completion(&common->tdown_complete); - k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true); - - if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) { - i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); - if (!i) - dev_err(common->dev, "rx teardown timeout\n"); - } - - for (i = common->rx_ch_num_flows - 1; i >= 0; i--) { - napi_disable(&rx_chn->flows[i].napi_rx); - hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer); - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, - am65_cpsw_nuss_rx_cleanup, !!i); - } - - k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); - + am65_cpsw_destroy_txqs(common); + am65_cpsw_destroy_rxqs(common); cpsw_ale_stop(common->ale); writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); - am65_cpsw_destroy_xdp_rxqs(common); - dev_dbg(common->dev, "cpsw_nuss stopped\n"); return 0; } @@ -1014,6 +1050,15 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) common->usage_count++; + /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes. + * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port. + */ + if (netdev_uses_dsa(ndev)) { + reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL); + reg &= ~AM65_CPSW_CTL_VLAN_AWARE; + writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL); + } + am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); am65_cpsw_port_enable_dscp_map(port); @@ -1133,9 +1178,11 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, struct xdp_frame *xdpf; struct bpf_prog *prog; struct page *page; + int pkt_len; u32 act; int err; + pkt_len = *len; prog = READ_ONCE(port->xdp_prog); if (!prog) return AM65_CPSW_XDP_PASS; @@ -1153,8 +1200,10 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); xdpf = xdp_convert_buff_to_frame(xdp); - if (unlikely(!xdpf)) + if (unlikely(!xdpf)) { + ndev->stats.tx_dropped++; goto drop; + } __netif_tx_lock(netif_txq, cpu); err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, @@ -1163,14 +1212,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, if (err) goto drop; - dev_sw_netstats_tx_add(ndev, 1, *len); + dev_sw_netstats_rx_add(ndev, pkt_len); ret = AM65_CPSW_XDP_CONSUMED; goto out; case XDP_REDIRECT: if (unlikely(xdp_do_redirect(ndev, xdp, prog))) goto drop; - dev_sw_netstats_rx_add(ndev, *len); + dev_sw_netstats_rx_add(ndev, pkt_len); ret = AM65_CPSW_XDP_REDIRECT; goto out; default: @@ -1279,16 +1328,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info); dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - skb = am65_cpsw_build_skb(page_addr, ndev, - AM65_CPSW_MAX_PACKET_SIZE); - if (unlikely(!skb)) { - new_page = page; - goto requeue; - } - if (port->xdp_prog) { xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, @@ -1298,9 +1339,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, if (*xdp_state != AM65_CPSW_XDP_PASS) goto allocate; - /* Compute additional headroom to be reserved */ - headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb); - skb_reserve(skb, headroom); + headroom = xdp.data - xdp.data_hard_start; + } else { + headroom = AM65_CPSW_HEADROOM; + } + + skb = am65_cpsw_build_skb(page_addr, ndev, + AM65_CPSW_MAX_PACKET_SIZE, headroom); + if (unlikely(!skb)) { + new_page = page; + goto requeue; } ndev_priv = netdev_priv(ndev); @@ -2242,13 +2290,11 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) struct device *dev = common->dev; int i; - devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common); - common->tx_ch_rate_msk = 0; for (i = 0; i < common->tx_ch_num; i++) { struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - if (tx_chn->irq) + if (tx_chn->irq > 0) devm_free_irq(dev, tx_chn->irq, tx_chn); netif_napi_del(&tx_chn->napi_tx); @@ -2265,8 +2311,6 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) for (i = 0; i < common->tx_ch_num; i++) { struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, - am65_cpsw_nuss_tx_poll); hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback; @@ -2279,9 +2323,21 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) tx_chn->id, tx_chn->irq, ret); goto err; } + + netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, + am65_cpsw_nuss_tx_poll); } + return 0; + err: + for (--i ; i >= 0 ; i--) { + struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; + + netif_napi_del(&tx_chn->napi_tx); + devm_free_irq(dev, tx_chn->irq, tx_chn); + } + return ret; } @@ -2362,12 +2418,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) goto err; } + return 0; + err: - i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common); - if (i) { - dev_err(dev, "Failed to add free_tx_chns action %d\n", i); - return i; - } + am65_cpsw_nuss_free_tx_chns(common); return ret; } @@ -2395,7 +2449,6 @@ static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common) rx_chn = &common->rx_chns; flows = rx_chn->flows; - devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common); for (i = 0; i < common->rx_ch_num_flows; i++) { if (!(flows[i].irq < 0)) @@ -2494,7 +2547,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) i, &rx_flow_cfg); if (ret) { dev_err(dev, "Failed to init rx flow%d %d\n", i, ret); - goto err; + goto err_flow; } if (!i) fdqring_id = @@ -2506,14 +2559,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) dev_err(dev, "Failed to get rx dma irq %d\n", flow->irq); ret = flow->irq; - goto err; + goto err_flow; } snprintf(flow->name, sizeof(flow->name), "%s-rx%d", dev_name(dev), i); - netif_napi_add(common->dma_ndev, &flow->napi_rx, - am65_cpsw_nuss_rx_poll); hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; @@ -2526,20 +2577,28 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) dev_err(dev, "failure requesting rx %d irq %u, %d\n", i, flow->irq, ret); flow->irq = -EINVAL; - goto err; + goto err_flow; } + + netif_napi_add(common->dma_ndev, &flow->napi_rx, + am65_cpsw_nuss_rx_poll); } /* setup classifier to route priorities to flows */ cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows); -err: - i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common); - if (i) { - dev_err(dev, "Failed to add free_rx_chns action %d\n", i); - return i; + return 0; + +err_flow: + for (--i; i >= 0 ; i--) { + flow = &rx_chn->flows[i]; + netif_napi_del(&flow->napi_rx); + devm_free_irq(dev, flow->irq, flow); } +err: + am65_cpsw_nuss_free_rx_chns(common); + return ret; } @@ -2559,20 +2618,15 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node, { u32 mac_lo, mac_hi, offset; struct regmap *syscon; - int ret; - syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse"); + syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse", + 1, &offset); if (IS_ERR(syscon)) { if (PTR_ERR(syscon) == -ENODEV) return 0; return PTR_ERR(syscon); } - ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1, - &offset); - if (ret) - return ret; - regmap_read(syscon, offset, &mac_lo); regmap_read(syscon, offset + 4, &mac_hi); @@ -3349,7 +3403,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) return ret; ret = am65_cpsw_nuss_init_rx_chns(common); if (ret) - return ret; + goto err_remove_tx; /* The DMA Channels are not guaranteed to be in a clean state. * Reset and disable them to ensure that they are back to the @@ -3370,7 +3424,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) ret = am65_cpsw_nuss_register_devlink(common); if (ret) - return ret; + goto err_remove_rx; for (i = 0; i < common->port_num; i++) { port = &common->ports[i]; @@ -3401,6 +3455,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) err_cleanup_ndev: am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_unregister_devlink(common); +err_remove_rx: + am65_cpsw_nuss_remove_rx_chns(common); +err_remove_tx: + am65_cpsw_nuss_remove_tx_chns(common); return ret; } @@ -3420,6 +3478,8 @@ int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common, return ret; ret = am65_cpsw_nuss_init_rx_chns(common); + if (ret) + am65_cpsw_nuss_remove_tx_chns(common); return ret; } @@ -3551,7 +3611,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) init_completion(&common->tdown_complete); common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS; - common->pf_p0_rx_ptype_rrobin = false; + common->pf_p0_rx_ptype_rrobin = true; common->default_vlan = 1; common->ports = devm_kcalloc(dev, common->port_num, @@ -3678,6 +3738,8 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev) */ am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_unregister_devlink(common); + am65_cpsw_nuss_remove_rx_chns(common); + am65_cpsw_nuss_remove_tx_chns(common); am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); am65_cpsw_disable_serdes_phy(common); @@ -3739,8 +3801,10 @@ static int am65_cpsw_nuss_resume(struct device *dev) if (ret) return ret; ret = am65_cpsw_nuss_init_rx_chns(common); - if (ret) + if (ret) { + am65_cpsw_nuss_remove_tx_chns(common); return ret; + } /* If RX IRQ was disabled before suspend, keep it disabled */ for (i = 0; i < common->rx_ch_num_flows; i++) { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4ef8cf6ea135..0cb6fa6e5b7d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -635,6 +635,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->phy = phy; + phy_disable_eee(slave->phy); + phy_attached_info(slave->phy); phy_start(slave->phy); @@ -684,7 +686,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) soft_reset("cpsw", &cpsw->regs->soft_reset); cpsw_ale_start(cpsw->ale); - /* switch to vlan unaware mode */ + /* switch to vlan aware mode */ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, CPSW_ALE_VLAN_AWARE); control_reg = readl(&cpsw->regs->control); @@ -1225,7 +1227,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_link_ksettings = cpsw_get_link_ksettings, .set_link_ksettings = cpsw_set_link_ksettings, .get_eee = cpsw_get_eee, - .set_eee = cpsw_set_eee, .nway_reset = cpsw_nway_reset, .get_ringparam = cpsw_get_ringparam, .set_ringparam = cpsw_set_ringparam, diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 52e4e350b734..5cc72a91f220 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -127,15 +127,15 @@ struct cpsw_ale_dev_id { static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { - int idx, idx2; + int idx, idx2, index; u32 hi_val = 0; idx = start / 32; idx2 = (start + bits - 1) / 32; /* Check if bits to be fetched exceed a word */ if (idx != idx2) { - idx2 = 2 - idx2; /* flip */ - hi_val = ale_entry[idx2] << ((idx2 * 32) - start); + index = 2 - idx2; /* flip */ + hi_val = ale_entry[index] << ((idx2 * 32) - start); } start -= idx * 32; idx = 2 - idx; /* flip */ @@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, u32 value) { - int idx, idx2; + int idx, idx2, index; value &= BITMASK(bits); idx = start / 32; idx2 = (start + bits - 1) / 32; /* Check if bits to be set exceed a word */ if (idx != idx2) { - idx2 = 2 - idx2; /* flip */ - ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32))); - ale_entry[idx2] |= (value >> ((idx2 * 32) - start)); + index = 2 - idx2; /* flip */ + ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32))); + ale_entry[index] |= (value >> ((idx2 * 32) - start)); } start -= idx * 32; idx = 2 - idx; /* flip */ diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 21d55a180ef6..bdc4db0d169c 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -434,18 +434,6 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata) return -EOPNOTSUPP; } -int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (cpsw->slaves[slave_no].phy) - return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata); - else - return -EOPNOTSUPP; -} - int cpsw_nway_reset(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index a98bcc5eb566..cec0a90659d9 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -554,7 +554,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) soft_reset("cpsw", &cpsw->regs->soft_reset); cpsw_ale_start(cpsw->ale); - /* switch to vlan unaware mode */ + /* switch to vlan aware mode */ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, CPSW_ALE_VLAN_AWARE); control_reg = readl(&cpsw->regs->control); @@ -778,6 +778,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->phy = phy; + phy_disable_eee(slave->phy); + phy_attached_info(slave->phy); phy_start(slave->phy); @@ -1209,7 +1211,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_link_ksettings = cpsw_get_link_ksettings, .set_link_ksettings = cpsw_set_link_ksettings, .get_eee = cpsw_get_eee, - .set_eee = cpsw_set_eee, .nway_reset = cpsw_nway_reset, .get_ringparam = cpsw_get_ringparam, .set_ringparam = cpsw_set_ringparam, diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 1f448290b9f4..f2fc55d9295d 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -497,7 +497,6 @@ int cpsw_get_link_ksettings(struct net_device *ndev, int cpsw_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *ecmd); int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata); -int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata); int cpsw_nway_reset(struct net_device *ndev); void cpsw_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ering, diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 5d6d1cf78e93..768578c0d958 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep) for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) { regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG, IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp)); + + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(cmp), 0); } /* enable reset counter on CMP0 event */ @@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep) } icss_iep_disable(iep); + if (iep->pps_enabled) + icss_iep_pps_enable(iep, false); + else if (iep->perout_enabled) + icss_iep_perout_enable(iep, NULL, false); + return 0; } EXPORT_SYMBOL_GPL(icss_iep_exit); diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index fdebeb2f84e0..74f0f200a89d 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id) } EXPORT_SYMBOL_GPL(prueth_rx_irq); -void prueth_emac_stop(struct prueth_emac *emac) -{ - struct prueth *prueth = emac->prueth; - int slice; - - switch (emac->port_id) { - case PRUETH_PORT_MII0: - slice = ICSS_SLICE0; - break; - case PRUETH_PORT_MII1: - slice = ICSS_SLICE1; - break; - default: - netdev_err(emac->ndev, "invalid port\n"); - return; - } - - emac->fw_running = 0; - if (!emac->is_sr1) - rproc_shutdown(prueth->txpru[slice]); - rproc_shutdown(prueth->rtu[slice]); - rproc_shutdown(prueth->pru[slice]); -} -EXPORT_SYMBOL_GPL(prueth_emac_stop); - void prueth_cleanup_tx_ts(struct prueth_emac *emac) { int i; diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index 5d2491c2943a..ddfd1c02a885 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac) return 0; } -static void icssg_init_emac_mode(struct prueth *prueth) +void icssg_init_emac_mode(struct prueth *prueth) { /* When the device is configured as a bridge and it is being brought * back to the emac mode, the host mac address has to be set as 0. @@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth) int i; u8 mac[ETH_ALEN] = { 0 }; - if (prueth->emacs_initialized) - return; - /* Set VLAN TABLE address base */ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, addr << SMEM_VLAN_OFFSET); @@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth) /* Clear host MAC address */ icssg_class_set_host_mac_addr(prueth->miig_rt, mac); } +EXPORT_SYMBOL_GPL(icssg_init_emac_mode); -static void icssg_init_fw_offload_mode(struct prueth *prueth) +void icssg_init_fw_offload_mode(struct prueth *prueth) { u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; int i; - if (prueth->emacs_initialized) - return; - /* Set VLAN TABLE address base */ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, addr << SMEM_VLAN_OFFSET); @@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth) icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr); icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST); } +EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode); int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) { @@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) struct icssg_flow_cfg __iomem *flow_cfg; int ret; - if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) - icssg_init_fw_offload_mode(prueth); - else - icssg_init_emac_mode(prueth); - memset_io(config, 0, TAS_GATE_MASK_LIST0); icssg_miig_queues_init(prueth, slice); @@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port) writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET); } EXPORT_SYMBOL_GPL(icssg_set_pvid); + +int emac_fdb_flow_id_updated(struct prueth_emac *emac) +{ + struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; + int slice = prueth_emac_slice(emac); + struct mgmt_cmd fdb_cmd = { 0 }; + int ret; + + fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER; + fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW; + fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq); + fdb_cmd.param = 0; + + fdb_cmd.param |= (slice << 4); + fdb_cmd.cmd_args[0] = 0; + + ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp); + if (ret) + return ret; + + WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum); + return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated); diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index 92c2deaa3068..c884e9fa099e 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -55,6 +55,7 @@ struct icssg_rxq_ctx { #define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03 #define ICSSG_FW_MGMT_CMD_TYPE 0x04 #define ICSSG_FW_MGMT_PKT 0x80000000 +#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05 struct icssg_r30_cmd { u32 cmd[4]; diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index c568c84a032b..00ed97860547 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = { } }; -static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) +static int prueth_start(struct rproc *rproc, const char *fw_name) +{ + int ret; + + ret = rproc_set_firmware(rproc, fw_name); + if (ret) + return ret; + return rproc_boot(rproc); +} + +static void prueth_shutdown(struct rproc *rproc) +{ + rproc_shutdown(rproc); +} + +static int prueth_emac_start(struct prueth *prueth) { struct icssg_firmwares *firmwares; struct device *dev = prueth->dev; - int slice, ret; + int ret, slice; if (prueth->is_switch_mode) firmwares = icssg_switch_firmwares; @@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) else firmwares = icssg_emac_firmwares; - slice = prueth_emac_slice(emac); - if (slice < 0) { - netdev_err(emac->ndev, "invalid port\n"); - return -EINVAL; + for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { + ret = prueth_start(prueth->pru[slice], firmwares[slice].pru); + if (ret) { + dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); + goto unwind_slices; + } + + ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu); + if (ret) { + dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); + rproc_shutdown(prueth->pru[slice]); + goto unwind_slices; + } + + ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru); + if (ret) { + dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); + rproc_shutdown(prueth->rtu[slice]); + rproc_shutdown(prueth->pru[slice]); + goto unwind_slices; + } } - ret = icssg_config(prueth, emac, slice); - if (ret) - return ret; + return 0; - ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru); - ret = rproc_boot(prueth->pru[slice]); - if (ret) { - dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); - return -EINVAL; +unwind_slices: + while (--slice >= 0) { + prueth_shutdown(prueth->txpru[slice]); + prueth_shutdown(prueth->rtu[slice]); + prueth_shutdown(prueth->pru[slice]); } - ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu); - ret = rproc_boot(prueth->rtu[slice]); - if (ret) { - dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); - goto halt_pru; + return ret; +} + +static void prueth_emac_stop(struct prueth *prueth) +{ + int slice; + + for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { + prueth_shutdown(prueth->txpru[slice]); + prueth_shutdown(prueth->rtu[slice]); + prueth_shutdown(prueth->pru[slice]); + } +} + +static int prueth_emac_common_start(struct prueth *prueth) +{ + struct prueth_emac *emac; + int ret = 0; + int slice; + + if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) + return -EINVAL; + + /* clear SMEM and MSMC settings for all slices */ + memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); + memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); + + icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false); + icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false); + + if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) + icssg_init_fw_offload_mode(prueth); + else + icssg_init_emac_mode(prueth); + + for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { + emac = prueth->emac[slice]; + if (!emac) + continue; + ret = icssg_config(prueth, emac, slice); + if (ret) + goto disable_class; } - ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru); - ret = rproc_boot(prueth->txpru[slice]); + ret = prueth_emac_start(prueth); + if (ret) + goto disable_class; + + emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : + prueth->emac[ICSS_SLICE1]; + ret = icss_iep_init(emac->iep, &prueth_iep_clockops, + emac, IEP_DEFAULT_CYCLE_TIME_NS); if (ret) { - dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); - goto halt_rtu; + dev_err(prueth->dev, "Failed to initialize IEP module\n"); + goto stop_pruss; } - emac->fw_running = 1; return 0; -halt_rtu: - rproc_shutdown(prueth->rtu[slice]); +stop_pruss: + prueth_emac_stop(prueth); -halt_pru: - rproc_shutdown(prueth->pru[slice]); +disable_class: + icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); + icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); return ret; } +static int prueth_emac_common_stop(struct prueth *prueth) +{ + struct prueth_emac *emac; + + if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1]) + return -EINVAL; + + icssg_class_disable(prueth->miig_rt, ICSS_SLICE0); + icssg_class_disable(prueth->miig_rt, ICSS_SLICE1); + + prueth_emac_stop(prueth); + + emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] : + prueth->emac[ICSS_SLICE1]; + icss_iep_exit(emac->iep); + + return 0; +} + /* called back by PHY layer if there is change in link state of hw port*/ static void emac_adjust_link(struct net_device *ndev) { @@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns) u32 cycletime; int timeout; - if (!emac->fw_running) - return; - sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET; cycletime = IEP_DEFAULT_CYCLE_TIME_NS; @@ -472,61 +561,134 @@ const struct icss_iep_clockops prueth_iep_clockops = { static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) { - struct prueth_emac *emac = netdev_priv(ndev); - int port_mask = BIT(emac->port_id); + struct net_device *real_dev; + struct prueth_emac *emac; + int port_mask; + u8 vlan_id; + + vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; + real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; + emac = netdev_priv(real_dev); - port_mask |= icssg_fdb_lookup(emac, addr, 0); - icssg_fdb_add_del(emac, addr, 0, port_mask, true); - icssg_vtbl_modify(emac, 0, port_mask, port_mask, true); + port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id); + icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true); + icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true); return 0; } static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) { - struct prueth_emac *emac = netdev_priv(ndev); - int port_mask = BIT(emac->port_id); + struct net_device *real_dev; + struct prueth_emac *emac; int other_port_mask; + int port_mask; + u8 vlan_id; + + vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC; + real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; + emac = netdev_priv(real_dev); - other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0); + port_mask = BIT(emac->port_id); + other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id); - icssg_fdb_add_del(emac, addr, 0, port_mask, false); - icssg_vtbl_modify(emac, 0, port_mask, port_mask, false); + icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false); + icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false); if (other_port_mask) { - icssg_fdb_add_del(emac, addr, 0, other_port_mask, true); - icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true); + icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true); + icssg_vtbl_modify(emac, vlan_id, other_port_mask, + other_port_mask, true); } return 0; } -static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) +static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac, + const u8 *addr, u8 vid, bool add) { - struct prueth_emac *emac = netdev_priv(ndev); - struct prueth *prueth = emac->prueth; - - icssg_fdb_add_del(emac, addr, prueth->default_vlan, + icssg_fdb_add_del(emac, addr, vid, ICSSG_FDB_ENTRY_P0_MEMBERSHIP | ICSSG_FDB_ENTRY_P1_MEMBERSHIP | ICSSG_FDB_ENTRY_P2_MEMBERSHIP | - ICSSG_FDB_ENTRY_BLOCK, true); + ICSSG_FDB_ENTRY_BLOCK, add); + + if (add) + icssg_vtbl_modify(emac, vid, BIT(emac->port_id), + BIT(emac->port_id), add); +} + +static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) +{ + struct net_device *real_dev; + struct prueth_emac *emac; + u8 vlan_id, i; + + vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR; + real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; + + if (is_hsr_master(real_dev)) { + for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { + emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); + if (!emac) + return -EINVAL; + icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, + true); + } + } else { + emac = netdev_priv(real_dev); + icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true); + } - icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id), - BIT(emac->port_id), true); return 0; } static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) { - struct prueth_emac *emac = netdev_priv(ndev); - struct prueth *prueth = emac->prueth; + struct net_device *real_dev; + struct prueth_emac *emac; + u8 vlan_id, i; + + vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR; + real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; + + if (is_hsr_master(real_dev)) { + for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { + emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); + if (!emac) + return -EINVAL; + icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, + false); + } + } else { + emac = netdev_priv(real_dev); + icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false); + } - icssg_fdb_add_del(emac, addr, prueth->default_vlan, - ICSSG_FDB_ENTRY_P0_MEMBERSHIP | - ICSSG_FDB_ENTRY_P1_MEMBERSHIP | - ICSSG_FDB_ENTRY_P2_MEMBERSHIP | - ICSSG_FDB_ENTRY_BLOCK, false); + return 0; +} + +static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, + void *args) +{ + struct prueth_emac *emac = args; + + if (!vdev || !vid) + return 0; + + netif_addr_lock_bh(vdev); + __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc, + vdev->addr_len); + netif_addr_unlock_bh(vdev); + + if (emac->prueth->is_hsr_offload_mode) + __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev, + icssg_prueth_hsr_add_mcast, + icssg_prueth_hsr_del_mcast); + else + __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev, + icssg_prueth_add_mcast, + icssg_prueth_del_mcast); return 0; } @@ -543,23 +705,17 @@ static int emac_ndo_open(struct net_device *ndev) { struct prueth_emac *emac = netdev_priv(ndev); int ret, i, num_data_chn = emac->tx_ch_num; + struct icssg_flow_cfg __iomem *flow_cfg; struct prueth *prueth = emac->prueth; int slice = prueth_emac_slice(emac); struct device *dev = prueth->dev; int max_rx_flows; int rx_flow; - /* clear SMEM and MSMC settings for all slices */ - if (!prueth->emacs_initialized) { - memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); - memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); - } - /* set h/w MAC as user might have re-configured */ ether_addr_copy(emac->mac_addr, ndev->dev_addr); icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); - icssg_class_default(prueth->miig_rt, slice, 0, false); icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); /* Notify the stack of the actual queue counts. */ @@ -597,18 +753,23 @@ static int emac_ndo_open(struct net_device *ndev) goto cleanup_napi; } - /* reset and start PRU firmware */ - ret = prueth_emac_start(prueth, emac); - if (ret) - goto free_rx_irq; + if (!prueth->emacs_initialized) { + ret = prueth_emac_common_start(prueth); + if (ret) + goto free_rx_irq; + } - icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); + flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; + writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow); + ret = emac_fdb_flow_id_updated(emac); - if (!prueth->emacs_initialized) { - ret = icss_iep_init(emac->iep, &prueth_iep_clockops, - emac, IEP_DEFAULT_CYCLE_TIME_NS); + if (ret) { + netdev_err(ndev, "Failed to update Rx Flow ID %d", ret); + goto stop; } + icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); + ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq, IRQF_ONESHOT, dev_name(dev), emac); if (ret) @@ -653,7 +814,8 @@ reset_rx_chn: free_tx_ts_irq: free_irq(emac->tx_ts_irq, emac); stop: - prueth_emac_stop(emac); + if (!prueth->emacs_initialized) + prueth_emac_common_stop(prueth); free_rx_irq: free_irq(emac->rx_chns.irq[rx_flow], emac); cleanup_napi: @@ -689,8 +851,6 @@ static int emac_ndo_stop(struct net_device *ndev) if (ndev->phydev) phy_stop(ndev->phydev); - icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); - if (emac->prueth->is_hsr_offload_mode) __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast); else @@ -728,11 +888,9 @@ static int emac_ndo_stop(struct net_device *ndev) /* Destroying the queued work in ndo_stop() */ cancel_delayed_work_sync(&emac->stats_work); - if (prueth->emacs_initialized == 1) - icss_iep_exit(emac->iep); - /* stop PRUs */ - prueth_emac_stop(emac); + if (prueth->emacs_initialized == 1) + prueth_emac_common_stop(prueth); free_irq(emac->tx_ts_irq, emac); @@ -772,12 +930,22 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work) return; } - if (emac->prueth->is_hsr_offload_mode) + if (emac->prueth->is_hsr_offload_mode) { __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast, icssg_prueth_hsr_del_mcast); - else + if (rtnl_trylock()) { + vlan_for_each(emac->prueth->hsr_dev, + icssg_update_vlan_mcast, emac); + rtnl_unlock(); + } + } else { __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast); + if (rtnl_trylock()) { + vlan_for_each(ndev, icssg_update_vlan_mcast, emac); + rtnl_unlock(); + } + } } /** @@ -822,19 +990,19 @@ static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev, { struct prueth_emac *emac = netdev_priv(ndev); struct prueth *prueth = emac->prueth; + int port_mask = BIT(emac->port_id); int untag_mask = 0; - int port_mask; - if (prueth->is_hsr_offload_mode) { - port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id); - untag_mask = 0; + if (prueth->is_hsr_offload_mode) + port_mask |= BIT(PRUETH_PORT_HOST); - netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n", - vid, port_mask, untag_mask); + __hw_addr_init(&emac->vlan_mcast_list[vid]); + netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n", + vid, port_mask, untag_mask); + + icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); + icssg_set_pvid(emac->prueth, vid, emac->port_id); - icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); - icssg_set_pvid(emac->prueth, vid, emac->port_id); - } return 0; } @@ -843,18 +1011,16 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev, { struct prueth_emac *emac = netdev_priv(ndev); struct prueth *prueth = emac->prueth; + int port_mask = BIT(emac->port_id); int untag_mask = 0; - int port_mask; - if (prueth->is_hsr_offload_mode) { + if (prueth->is_hsr_offload_mode) port_mask = BIT(PRUETH_PORT_HOST); - untag_mask = 0; - netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n", - vid, port_mask, untag_mask); + netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n", + vid, port_mask, untag_mask); + icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false); - icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false); - } return 0; } @@ -1053,10 +1219,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth) } } -static void prueth_emac_restart(struct prueth *prueth) +static int prueth_emac_restart(struct prueth *prueth) { struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; + int ret; /* Detach the net_device for both PRUeth ports*/ if (netif_running(emac0->ndev)) @@ -1065,36 +1232,46 @@ static void prueth_emac_restart(struct prueth *prueth) netif_device_detach(emac1->ndev); /* Disable both PRUeth ports */ - icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); - icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); + ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); + ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); + if (ret) + return ret; /* Stop both pru cores for both PRUeth ports*/ - prueth_emac_stop(emac0); - prueth->emacs_initialized--; - prueth_emac_stop(emac1); - prueth->emacs_initialized--; + ret = prueth_emac_common_stop(prueth); + if (ret) { + dev_err(prueth->dev, "Failed to stop the firmwares"); + return ret; + } /* Start both pru cores for both PRUeth ports */ - prueth_emac_start(prueth, emac0); - prueth->emacs_initialized++; - prueth_emac_start(prueth, emac1); - prueth->emacs_initialized++; + ret = prueth_emac_common_start(prueth); + if (ret) { + dev_err(prueth->dev, "Failed to start the firmwares"); + return ret; + } /* Enable forwarding for both PRUeth ports */ - icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); - icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); + ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); + ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); /* Attache net_device for both PRUeth ports */ netif_device_attach(emac0->ndev); netif_device_attach(emac1->ndev); + + return ret; } static void icssg_change_mode(struct prueth *prueth) { struct prueth_emac *emac; - int mac; + int mac, ret; - prueth_emac_restart(prueth); + ret = prueth_emac_restart(prueth); + if (ret) { + dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); + return; + } for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { emac = prueth->emac[mac]; @@ -1158,7 +1335,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev, if (prueth->br_members & BIT(PRUETH_PORT_MII0) && prueth->br_members & BIT(PRUETH_PORT_MII1)) { prueth->is_switch_mode = true; - prueth->default_vlan = 1; + prueth->default_vlan = PRUETH_DFLT_VLAN_SW; emac->port_vlan = prueth->default_vlan; icssg_change_mode(prueth); } @@ -1173,13 +1350,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev) { struct prueth_emac *emac = netdev_priv(ndev); struct prueth *prueth = emac->prueth; + int ret; prueth->br_members &= ~BIT(emac->port_id); if (prueth->is_switch_mode) { prueth->is_switch_mode = false; emac->port_vlan = 0; - prueth_emac_restart(prueth); + ret = prueth_emac_restart(prueth); + if (ret) { + dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); + return; + } } prueth_offload_fwd_mark_update(prueth); @@ -1211,7 +1393,7 @@ static int prueth_hsr_port_link(struct net_device *ndev) NETIF_PRUETH_HSR_OFFLOAD_FEATURES)) return -EOPNOTSUPP; prueth->is_hsr_offload_mode = true; - prueth->default_vlan = 1; + prueth->default_vlan = PRUETH_DFLT_VLAN_HSR; emac0->port_vlan = prueth->default_vlan; emac1->port_vlan = prueth->default_vlan; icssg_change_mode(prueth); @@ -1228,6 +1410,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev) struct prueth *prueth = emac->prueth; struct prueth_emac *emac0; struct prueth_emac *emac1; + int ret; emac0 = prueth->emac[PRUETH_MAC0]; emac1 = prueth->emac[PRUETH_MAC1]; @@ -1238,7 +1421,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev) emac0->port_vlan = 0; emac1->port_vlan = 0; prueth->hsr_dev = NULL; - prueth_emac_restart(prueth); + ret = prueth_emac_restart(prueth); + if (ret) { + dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process"); + return; + } netdev_dbg(ndev, "Disabling HSR Offload mode\n"); } } @@ -1413,13 +1600,10 @@ static int prueth_probe(struct platform_device *pdev) prueth->pa_stats = NULL; } - if (eth0_node) { + if (eth0_node || eth1_node) { ret = prueth_get_cores(prueth, ICSS_SLICE0, false); if (ret) goto put_cores; - } - - if (eth1_node) { ret = prueth_get_cores(prueth, ICSS_SLICE1, false); if (ret) goto put_cores; @@ -1618,14 +1802,12 @@ put_pruss: pruss_put(prueth->pruss); put_cores: - if (eth1_node) { - prueth_put_cores(prueth, ICSS_SLICE1); - of_node_put(eth1_node); - } - - if (eth0_node) { + if (eth0_node || eth1_node) { prueth_put_cores(prueth, ICSS_SLICE0); of_node_put(eth0_node); + + prueth_put_cores(prueth, ICSS_SLICE1); + of_node_put(eth1_node); } return ret; diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index f5c1d473e9f9..329b46e9ee53 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -83,6 +83,12 @@ #define ICSS_CMD_ADD_FILTER 0x7 #define ICSS_CMD_ADD_MAC 0x8 +/* VLAN Filtering Related MACROs */ +#define PRUETH_DFLT_VLAN_HSR 1 +#define PRUETH_DFLT_VLAN_SW 1 +#define PRUETH_DFLT_VLAN_MAC 0 +#define MAX_VLAN_ID 256 + /* In switch mode there are 3 real ports i.e. 3 mac addrs. * however Linux sees only the host side port. The other 2 ports * are the switch ports. @@ -140,7 +146,6 @@ struct prueth_rx_chn { /* data for each emac port */ struct prueth_emac { bool is_sr1; - bool fw_running; struct prueth *prueth; struct net_device *ndev; u8 mac_addr[6]; @@ -201,6 +206,8 @@ struct prueth_emac { /* RX IRQ Coalescing Related */ struct hrtimer rx_hrtimer; unsigned long rx_pace_timeout_ns; + + struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID]; }; /** @@ -361,6 +368,8 @@ int icssg_set_port_state(struct prueth_emac *emac, enum icssg_port_state_cmd state); void icssg_config_set_speed(struct prueth_emac *emac); void icssg_config_half_duplex(struct prueth_emac *emac); +void icssg_init_emac_mode(struct prueth *prueth); +void icssg_init_fw_offload_mode(struct prueth *prueth); /* Buffer queue helpers */ int icssg_queue_pop(struct prueth *prueth, u8 queue); @@ -377,6 +386,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, u8 untag_mask, bool add); u16 icssg_get_pvid(struct prueth_emac *emac); void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port); +int emac_fdb_flow_id_updated(struct prueth_emac *emac); #define prueth_napi_to_tx_chn(pnapi) \ container_of(pnapi, struct prueth_tx_chn, napi_tx) @@ -407,7 +417,6 @@ void emac_rx_timestamp(struct prueth_emac *emac, struct sk_buff *skb, u32 *psdata); enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev); irqreturn_t prueth_rx_irq(int irq, void *dev_id); -void prueth_emac_stop(struct prueth_emac *emac); void prueth_cleanup_tx_ts(struct prueth_emac *emac); int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget); int prueth_prepare_rx_chan(struct prueth_emac *emac, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 5024f0647a0d..64a19ff39562 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -440,7 +440,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) goto halt_pru; } - emac->fw_running = 1; return 0; halt_pru: @@ -449,6 +448,29 @@ halt_pru: return ret; } +static void prueth_emac_stop(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int slice; + + switch (emac->port_id) { + case PRUETH_PORT_MII0: + slice = ICSS_SLICE0; + break; + case PRUETH_PORT_MII1: + slice = ICSS_SLICE1; + break; + default: + netdev_err(emac->ndev, "invalid port\n"); + return; + } + + if (!emac->is_sr1) + rproc_shutdown(prueth->txpru[slice]); + rproc_shutdown(prueth->rtu[slice]); + rproc_shutdown(prueth->pru[slice]); +} + /** * emac_ndo_open - EMAC device open * @ndev: network adapter device @@ -1009,8 +1031,6 @@ static int prueth_probe(struct platform_device *pdev) (unsigned long)prueth->msmcram.va); prueth->msmcram.size = msmc_ram_size; memset_io(prueth->msmcram.va, 0, msmc_ram_size); - dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, - prueth->msmcram.va, prueth->msmcram.size); prueth->iep0 = icss_iep_get_idx(np, 0); if (IS_ERR(prueth->iep0)) { diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 894911f3d560..e56ebbdd428d 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1568,7 +1568,7 @@ static void init_registers(struct net_device *dev) if (rp->quirks & rqMgmt) rhine_init_cam_filter(dev); - napi_enable(&rp->napi); + napi_enable_locked(&rp->napi); iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); @@ -1696,7 +1696,10 @@ static int rhine_open(struct net_device *dev) rhine_power_init(dev); rhine_chip_reset(dev); rhine_task_enable(rp); + + netdev_lock(dev); init_registers(dev); + netdev_unlock(dev); netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", __func__, ioread16(ioaddr + ChipCmd), @@ -1727,6 +1730,8 @@ static void rhine_reset_task(struct work_struct *work) napi_disable(&rp->napi); netif_tx_disable(dev); + + netdev_lock(dev); spin_lock_bh(&rp->lock); /* clear all descriptors */ @@ -1740,6 +1745,7 @@ static void rhine_reset_task(struct work_struct *work) init_registers(dev); spin_unlock_bh(&rp->lock); + netdev_unlock(dev); netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; @@ -2541,9 +2547,12 @@ static int rhine_resume(struct device *device) alloc_tbufs(dev); rhine_reset_rbufs(rp); rhine_task_enable(rp); + + netdev_lock(dev); spin_lock_bh(&rp->lock); init_registers(dev); spin_unlock_bh(&rp->lock); + netdev_unlock(dev); netif_device_attach(dev); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index dd4a07c97eee..5aa93144a4f5 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) if (ret < 0) goto out_free_tmp_vptr_1; - napi_disable(&vptr->napi); + netdev_lock(dev); + napi_disable_locked(&vptr->napi); spin_lock_irqsave(&vptr->lock, flags); @@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) velocity_give_many_rx_descs(vptr); - napi_enable(&vptr->napi); + napi_enable_locked(&vptr->napi); mac_enable_int(vptr->mac_regs); netif_start_queue(dev); spin_unlock_irqrestore(&vptr->lock, flags); + netdev_unlock(dev); velocity_free_rings(tmp_vptr); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 1bf9c38e4125..deaf670c160e 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -334,27 +334,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer, status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000, timeout * 1000, false, wx, WX_MNG_MBOX_CTL); + buf[0] = rd32(wx, WX_MNG_MBOX); + if ((buf[0] & 0xff0000) >> 16 == 0x80) { + wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff); + status = -EINVAL; + goto rel_out; + } + /* Check command completion */ if (status) { - wx_dbg(wx, "Command has failed with no status valid.\n"); - - buf[0] = rd32(wx, WX_MNG_MBOX); - if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { - status = -EINVAL; - goto rel_out; - } - if ((buf[0] & 0xff0000) >> 16 == 0x80) { - wx_dbg(wx, "It's unknown cmd.\n"); - status = -EINVAL; - goto rel_out; - } - + wx_err(wx, "Command has failed with no status valid.\n"); wx_dbg(wx, "write value:\n"); for (i = 0; i < dword_len; i++) wx_dbg(wx, "%x ", buffer[i]); wx_dbg(wx, "read value:\n"); for (i = 0; i < dword_len; i++) wx_dbg(wx, "%x ", buf[i]); + wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24); + + goto rel_out; } if (!return_data) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index d64b8abcf018..a3f4f3e42587 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -120,6 +120,9 @@ #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ +/* Constant to convert delay counts to microseconds */ +#define XAXIDMA_DELAY_SCALE (125ULL * USEC_PER_SEC) + /* Default TX/RX Threshold and delay timer values for SGDMA mode */ #define XAXIDMA_DFT_TX_THRESHOLD 24 #define XAXIDMA_DFT_TX_USEC 50 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 0f4b02fe6f85..f33178f90c42 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -238,11 +238,8 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) /* 1 Timeout Interval = 125 * (clock period of SG clock) */ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, - (u64)125000000); - if (result > 255) - result = 255; - - return result; + XAXIDMA_DELAY_SCALE); + return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK)); } /** @@ -2056,14 +2053,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, return -EBUSY; } - if (ecoalesce->rx_max_coalesced_frames) - lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; - if (ecoalesce->rx_coalesce_usecs) - lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; - if (ecoalesce->tx_max_coalesced_frames) - lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; - if (ecoalesce->tx_coalesce_usecs) - lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; + if (ecoalesce->rx_max_coalesced_frames > 255 || + ecoalesce->tx_max_coalesced_frames > 255) { + NL_SET_ERR_MSG(extack, "frames must be less than 256"); + return -EINVAL; + } + + if (!ecoalesce->rx_max_coalesced_frames || + !ecoalesce->tx_max_coalesced_frames) { + NL_SET_ERR_MSG(extack, "frames must be non-zero"); + return -EINVAL; + } + + if ((ecoalesce->rx_max_coalesced_frames > 1 && + !ecoalesce->rx_coalesce_usecs) || + (ecoalesce->tx_max_coalesced_frames > 1 && + !ecoalesce->tx_coalesce_usecs)) { + NL_SET_ERR_MSG(extack, + "usecs must be non-zero when frames is greater than one"); + return -EINVAL; + } + + lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; + lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; + lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; + lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; return 0; } @@ -2331,11 +2345,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) } static void axienet_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; - phylink_mii_c22_pcs_get_state(pcs_phy, state); + phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state); } static void axienet_pcs_an_restart(struct phylink_pcs *pcs) @@ -2882,6 +2897,7 @@ static int axienet_probe(struct platform_device *pdev) lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; + lp->phylink_config.mac_managed_pm = true; lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD; |