From c9011b028e956c3b6baa6f131d9eec43e4e52020 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 2 Feb 2023 17:56:19 +0530 Subject: net: macb: Perform zynqmp dynamic configuration only for SGMII interface In zynqmp platforms where firmware supports dynamic SGMII configuration but has other non-SGMII ethernet devices, it fails them with no packets received at the RX interface. To fix this behaviour perform SGMII dynamic configuration only for the SGMII phy interface. Fixes: 32cee7818111 ("net: macb: Add zynqmp SGMII dynamic configuration support") Signed-off-by: Radhey Shyam Pandey Reviewed-by: Jiri Pirko Reported-by: Michal Simek Tested-by: Michal Simek Reviewed-by: Claudiu Beznea Link: https://lore.kernel.org/r/1675340779-27499-1-git-send-email-radhey.shyam.pandey@amd.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cadence/macb_main.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 72e42820713d..6cda31520c42 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4627,25 +4627,26 @@ static int init_reset_optional(struct platform_device *pdev) if (ret) return dev_err_probe(&pdev->dev, ret, "failed to init SGMII PHY\n"); - } - ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG); - if (!ret) { - u32 pm_info[2]; + ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG); + if (!ret) { + u32 pm_info[2]; + + ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains", + pm_info, ARRAY_SIZE(pm_info)); + if (ret) { + dev_err(&pdev->dev, "Failed to read power management information\n"); + goto err_out_phy_exit; + } + ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0); + if (ret) + goto err_out_phy_exit; - ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains", - pm_info, ARRAY_SIZE(pm_info)); - if (ret) { - dev_err(&pdev->dev, "Failed to read power management information\n"); - goto err_out_phy_exit; + ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1); + if (ret) + goto err_out_phy_exit; } - ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0); - if (ret) - goto err_out_phy_exit; - ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1); - if (ret) - goto err_out_phy_exit; } /* Fully reset controller at hardware level if mapped in device tree */ -- cgit v1.2.3 From e8797a058466b60fc5a3291b92430c93ba90eaff Mon Sep 17 00:00:00 2001 From: Neel Patel Date: Thu, 2 Feb 2023 13:55:35 -0800 Subject: ionic: clean interrupt before enabling queue to avoid credit race Clear the interrupt credits before enabling the queue rather than after to be sure that the enabled queue starts at 0 and that we don't wipe away possible credits after enabling the queue. Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling") Signed-off-by: Neel Patel Signed-off-by: Shannon Nelson Reviewed-by: Leon Romanovsky Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 4dd16c487f2b..5e2dfa79f0e4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -269,6 +269,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) .oper = IONIC_Q_ENABLE, }, }; + int ret; idev = &lif->ionic->idev; dev = lif->ionic->dev; @@ -276,16 +277,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", ctx.cmd.q_control.index, ctx.cmd.q_control.type); + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); + + ret = ionic_adminq_post_wait(lif, &ctx); + if (ret) + return ret; + + if (qcq->napi.poll) + napi_enable(&qcq->napi); + if (qcq->flags & IONIC_QCQ_F_INTR) { irq_set_affinity_hint(qcq->intr.vector, &qcq->intr.affinity_mask); - napi_enable(&qcq->napi); - ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); } - return ionic_adminq_post_wait(lif, &ctx); + return 0; } static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) -- cgit v1.2.3 From 1fffb0254178de1d4d9e67f467a3460d10680b10 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 2 Feb 2023 13:55:36 -0800 Subject: ionic: clear up notifyq alloc commentary Make sure the q+cq alloc for NotifyQ is clearly documented and don't bother with unnecessary local variables. Signed-off-by: Shannon Nelson Reviewed-by: Leon Romanovsky Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 5e2dfa79f0e4..e51d8be7911c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -573,13 +573,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, } if (flags & IONIC_QCQ_F_NOTIFYQ) { - int q_size, cq_size; + int q_size; - /* q & cq need to be contiguous in case of notifyq */ + /* q & cq need to be contiguous in NotifyQ, so alloc it all in q + * and don't alloc qc. We leave new->qc_size and new->qc_base + * as 0 to be sure we don't try to free it later. + */ q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); - cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); - - new->q_size = PAGE_SIZE + q_size + cq_size; + new->q_size = PAGE_SIZE + q_size + + ALIGN(num_descs * cq_desc_size, PAGE_SIZE); new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, GFP_KERNEL); if (!new->q_base) { -- cgit v1.2.3 From b69585bfceceeffda940906cabfdaee4b47bde92 Mon Sep 17 00:00:00 2001 From: Allen Hubbe Date: Thu, 2 Feb 2023 13:55:37 -0800 Subject: ionic: missed doorbell workaround In one version of the HW there is a remote possibility that it will miss the doorbell ring. This adds a bit of protection to be sure we don't stall a queue from a missed doorbell. Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling") Signed-off-by: Allen Hubbe Signed-off-by: Shannon Nelson Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/pensando/ionic/ionic_dev.c | 9 ++- drivers/net/ethernet/pensando/ionic/ionic_dev.h | 12 ++++ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 41 ++++++++++- drivers/net/ethernet/pensando/ionic/ionic_lif.h | 2 + drivers/net/ethernet/pensando/ionic/ionic_main.c | 29 ++++++++ drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 87 +++++++++++++++++++++++- 6 files changed, 176 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 626b9113e7c4..d911f4fd9af6 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -708,9 +708,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, q->lif->index, q->name, q->hw_type, q->hw_index, q->head_idx, ring_doorbell); - if (ring_doorbell) + if (ring_doorbell) { ionic_dbell_ring(lif->kern_dbpage, q->hw_type, q->dbval | q->head_idx); + + q->dbell_jiffies = jiffies; + + if (q_to_qcq(q)->napi_qcq) + mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, + jiffies + IONIC_NAPI_DEADLINE); + } } static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 2a1d7b9c07e7..bce3ca38669b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -25,6 +25,12 @@ #define IONIC_DEV_INFO_REG_COUNT 32 #define IONIC_DEV_CMD_REG_COUNT 32 +#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */ +#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */ +#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ +#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ +#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */ + struct ionic_dev_bar { void __iomem *vaddr; phys_addr_t bus_addr; @@ -216,6 +222,8 @@ struct ionic_queue { struct ionic_lif *lif; struct ionic_desc_info *info; u64 dbval; + unsigned long dbell_deadline; + unsigned long dbell_jiffies; u16 head_idx; u16 tail_idx; unsigned int index; @@ -361,4 +369,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, int ionic_heartbeat_check(struct ionic *ionic); bool ionic_is_fw_running(struct ionic_dev *idev); +bool ionic_adminq_poke_doorbell(struct ionic_queue *q); +bool ionic_txq_poke_doorbell(struct ionic_queue *q); +bool ionic_rxq_poke_doorbell(struct ionic_queue *q); + #endif /* _IONIC_DEV_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index e51d8be7911c..63a78a9ac241 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -16,6 +16,7 @@ #include "ionic.h" #include "ionic_bus.h" +#include "ionic_dev.h" #include "ionic_lif.h" #include "ionic_txrx.h" #include "ionic_ethtool.h" @@ -200,6 +201,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) } } +static void ionic_napi_deadline(struct timer_list *timer) +{ + struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline); + + napi_schedule(&qcq->napi); +} + static irqreturn_t ionic_isr(int irq, void *data) { struct napi_struct *napi = data; @@ -325,6 +333,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f synchronize_irq(qcq->intr.vector); irq_set_affinity_hint(qcq->intr.vector, NULL); napi_disable(&qcq->napi); + del_timer_sync(&qcq->napi_deadline); } /* If there was a previous fw communcation error, don't bother with @@ -460,6 +469,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, n_qcq->intr.vector = src_qcq->intr.vector; n_qcq->intr.index = src_qcq->intr.index; + n_qcq->napi_qcq = src_qcq->napi_qcq; } static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) @@ -784,8 +794,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); - if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); + qcq->napi_qcq = qcq; + timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); + } qcq->flags |= IONIC_QCQ_F_INITED; @@ -839,11 +855,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); + q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); else netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); + qcq->napi_qcq = qcq; + timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); + qcq->flags |= IONIC_QCQ_F_INITED; return 0; @@ -1161,6 +1183,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) struct ionic_dev *idev = &lif->ionic->idev; unsigned long irqflags; unsigned int flags = 0; + bool resched = false; int rx_work = 0; int tx_work = 0; int n_work = 0; @@ -1198,6 +1221,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); } + if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q)) + resched = true; + if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q)) + resched = true; + if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q)) + resched = true; + if (resched) + mod_timer(&lif->adminqcq->napi_deadline, + jiffies + IONIC_NAPI_DEADLINE); + return work_done; } @@ -3256,8 +3289,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif) dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); + q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); + qcq->napi_qcq = qcq; + timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); + napi_enable(&qcq->napi); if (qcq->flags & IONIC_QCQ_F_INTR) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index a53984bf3544..734519895614 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -74,8 +74,10 @@ struct ionic_qcq { struct ionic_queue q; struct ionic_cq cq; struct ionic_intr_info intr; + struct timer_list napi_deadline; struct napi_struct napi; unsigned int flags; + struct ionic_qcq *napi_qcq; struct dentry *dentry; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index a13530ec4dd8..08c42b039d92 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q, complete_all(&ctx->work); } +bool ionic_adminq_poke_doorbell(struct ionic_queue *q) +{ + struct ionic_lif *lif = q->lif; + unsigned long now, then, dif; + unsigned long irqflags; + + spin_lock_irqsave(&lif->adminq_lock, irqflags); + + if (q->tail_idx == q->head_idx) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + return false; + } + + now = READ_ONCE(jiffies); + then = q->dbell_jiffies; + dif = now - then; + + if (dif > q->dbell_deadline) { + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_jiffies = now; + } + + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + + return true; +} + int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) { struct ionic_desc_info *desc_info; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 0c3977416cd1..f761780f0162 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, ionic_q_post(q, ring_dbell, cb_func, cb_arg); } +bool ionic_txq_poke_doorbell(struct ionic_queue *q) +{ + unsigned long now, then, dif; + struct netdev_queue *netdev_txq; + struct net_device *netdev; + + netdev = q->lif->netdev; + netdev_txq = netdev_get_tx_queue(netdev, q->index); + + HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id()); + + if (q->tail_idx == q->head_idx) { + HARD_TX_UNLOCK(netdev, netdev_txq); + return false; + } + + now = READ_ONCE(jiffies); + then = q->dbell_jiffies; + dif = now - then; + + if (dif > q->dbell_deadline) { + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_jiffies = now; + } + + HARD_TX_UNLOCK(netdev, netdev_txq); + + return true; +} + +bool ionic_rxq_poke_doorbell(struct ionic_queue *q) +{ + unsigned long now, then, dif; + + /* no lock, called from rx napi or txrx napi, nothing else can fill */ + + if (q->tail_idx == q->head_idx) + return false; + + now = READ_ONCE(jiffies); + then = q->dbell_jiffies; + dif = now - then; + + if (dif > q->dbell_deadline) { + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); + + q->dbell_jiffies = now; + + dif = 2 * q->dbell_deadline; + if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE) + dif = IONIC_RX_MAX_DOORBELL_DEADLINE; + + q->dbell_deadline = dif; + } + + return true; +} + static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) { return netdev_get_tx_queue(q->lif->netdev, q->index); @@ -424,6 +485,12 @@ void ionic_rx_fill(struct ionic_queue *q) ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, q->dbval | q->head_idx); + + q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; + q->dbell_jiffies = jiffies; + + mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, + jiffies + IONIC_NAPI_DEADLINE); } void ionic_rx_empty(struct ionic_queue *q) @@ -511,6 +578,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done, flags); } + if (!work_done && ionic_txq_poke_doorbell(&qcq->q)) + mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + return work_done; } @@ -544,23 +614,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) work_done, flags); } + if (!work_done && ionic_rxq_poke_doorbell(&qcq->q)) + mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + return work_done; } int ionic_txrx_napi(struct napi_struct *napi, int budget) { - struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_qcq *rxqcq = napi_to_qcq(napi); struct ionic_cq *rxcq = napi_to_cq(napi); unsigned int qi = rxcq->bound_q->index; + struct ionic_qcq *txqcq; struct ionic_dev *idev; struct ionic_lif *lif; struct ionic_cq *txcq; + bool resched = false; u32 rx_work_done = 0; u32 tx_work_done = 0; u32 flags = 0; lif = rxcq->bound_q->lif; idev = &lif->ionic->idev; + txqcq = lif->txqcqs[qi]; txcq = &lif->txqcqs[qi]->cq; tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, @@ -572,7 +648,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) ionic_rx_fill(rxcq->bound_q); if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { - ionic_dim_update(qcq, 0); + ionic_dim_update(rxqcq, 0); flags |= IONIC_INTR_CRED_UNMASK; rxcq->bound_intr->rearm_count++; } @@ -583,6 +659,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done + rx_work_done, flags); } + if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q)) + resched = true; + if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q)) + resched = true; + if (resched) + mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + return rx_work_done; } -- cgit v1.2.3 From d7d94b2612f5dc25d61dc7bf58aafe7b31f40191 Mon Sep 17 00:00:00 2001 From: Casper Andersson Date: Fri, 3 Feb 2023 09:55:57 +0100 Subject: net: microchip: sparx5: fix PTP init/deinit not checking all ports Check all ports instead of just port_count ports. PTP init was only checking ports 0 to port_count. If the hardware ports are not mapped starting from 0 then they would be missed, e.g. if only ports 20-30 were mapped it would attempt to init ports 0-10, resulting in NULL pointers when attempting to timestamp. Now it will init all mapped ports. Fixes: 70dfe25cd866 ("net: sparx5: Update extraction/injection for timestamping") Signed-off-by: Casper Andersson Reviewed-by: Horatiu Vultur Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c index 0ed1ea7727c5..69e76634f9aa 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c @@ -633,7 +633,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5) /* Enable master counters */ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG); - for (i = 0; i < sparx5->port_count; i++) { + for (i = 0; i < SPX5_PORTS; i++) { port = sparx5->ports[i]; if (!port) continue; @@ -649,7 +649,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5) struct sparx5_port *port; int i; - for (i = 0; i < sparx5->port_count; i++) { + for (i = 0; i < SPX5_PORTS; i++) { port = sparx5->ports[i]; if (!port) continue; -- cgit v1.2.3 From 4d159f7884f78b1aacb99b4fc37d1e3cb1194e39 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Mon, 30 Jan 2023 14:06:40 -0800 Subject: ice: Do not use WQ_MEM_RECLAIM flag for workqueue When both ice and the irdma driver are loaded, a warning in check_flush_dependency is being triggered. This is due to ice driver workqueue being allocated with the WQ_MEM_RECLAIM flag and the irdma one is not. According to kernel documentation, this flag should be set if the workqueue will be involved in the kernel's memory reclamation flow. Since it is not, there is no need for the ice driver's WQ to have this flag set so remove it. Example trace: [ +0.000004] workqueue: WQ_MEM_RECLAIM ice:ice_service_task [ice] is flushing !WQ_MEM_RECLAIM infiniband:0x0 [ +0.000139] WARNING: CPU: 0 PID: 728 at kernel/workqueue.c:2632 check_flush_dependency+0x178/0x1a0 [ +0.000011] Modules linked in: bonding tls xt_CHECKSUM xt_MASQUERADE xt_conntrack ipt_REJECT nf_reject_ipv4 nft_compat nft_cha in_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nf_tables nfnetlink bridge stp llc rfkill vfat fat intel_rapl_msr intel _rapl_common isst_if_common skx_edac nfit libnvdimm x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm irqbypass crct1 0dif_pclmul crc32_pclmul ghash_clmulni_intel rapl intel_cstate rpcrdma sunrpc rdma_ucm ib_srpt ib_isert iscsi_target_mod target_ core_mod ib_iser libiscsi scsi_transport_iscsi rdma_cm ib_cm iw_cm iTCO_wdt iTCO_vendor_support ipmi_ssif irdma mei_me ib_uverbs ib_core intel_uncore joydev pcspkr i2c_i801 acpi_ipmi mei lpc_ich i2c_smbus intel_pch_thermal ioatdma ipmi_si acpi_power_meter acpi_pad xfs libcrc32c sd_mod t10_pi crc64_rocksoft crc64 sg ahci ixgbe libahci ice i40e igb crc32c_intel mdio i2c_algo_bit liba ta dca wmi dm_mirror dm_region_hash dm_log dm_mod ipmi_devintf ipmi_msghandler fuse [ +0.000161] [last unloaded: bonding] [ +0.000006] CPU: 0 PID: 728 Comm: kworker/0:2 Tainted: G S 6.2.0-rc2_next-queue-13jan-00458-gc20aabd57164 #1 [ +0.000006] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0010.010620200716 01/06/2020 [ +0.000003] Workqueue: ice ice_service_task [ice] [ +0.000127] RIP: 0010:check_flush_dependency+0x178/0x1a0 [ +0.000005] Code: 89 8e 02 01 e8 49 3d 40 00 49 8b 55 18 48 8d 8d d0 00 00 00 48 8d b3 d0 00 00 00 4d 89 e0 48 c7 c7 e0 3b 08 9f e8 bb d3 07 01 <0f> 0b e9 be fe ff ff 80 3d 24 89 8e 02 00 0f 85 6b ff ff ff e9 06 [ +0.000004] RSP: 0018:ffff88810a39f990 EFLAGS: 00010282 [ +0.000005] RAX: 0000000000000000 RBX: ffff888141bc2400 RCX: 0000000000000000 [ +0.000004] RDX: 0000000000000001 RSI: dffffc0000000000 RDI: ffffffffa1213a80 [ +0.000003] RBP: ffff888194bf3400 R08: ffffed117b306112 R09: ffffed117b306112 [ +0.000003] R10: ffff888bd983088b R11: ffffed117b306111 R12: 0000000000000000 [ +0.000003] R13: ffff888111f84d00 R14: ffff88810a3943ac R15: ffff888194bf3400 [ +0.000004] FS: 0000000000000000(0000) GS:ffff888bd9800000(0000) knlGS:0000000000000000 [ +0.000003] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ +0.000003] CR2: 000056035b208b60 CR3: 000000017795e005 CR4: 00000000007706f0 [ +0.000003] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ +0.000003] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ +0.000002] PKRU: 55555554 [ +0.000003] Call Trace: [ +0.000002] [ +0.000003] __flush_workqueue+0x203/0x840 [ +0.000006] ? mutex_unlock+0x84/0xd0 [ +0.000008] ? __pfx_mutex_unlock+0x10/0x10 [ +0.000004] ? __pfx___flush_workqueue+0x10/0x10 [ +0.000006] ? mutex_lock+0xa3/0xf0 [ +0.000005] ib_cache_cleanup_one+0x39/0x190 [ib_core] [ +0.000174] __ib_unregister_device+0x84/0xf0 [ib_core] [ +0.000094] ib_unregister_device+0x25/0x30 [ib_core] [ +0.000093] irdma_ib_unregister_device+0x97/0xc0 [irdma] [ +0.000064] ? __pfx_irdma_ib_unregister_device+0x10/0x10 [irdma] [ +0.000059] ? up_write+0x5c/0x90 [ +0.000005] irdma_remove+0x36/0x90 [irdma] [ +0.000062] auxiliary_bus_remove+0x32/0x50 [ +0.000007] device_release_driver_internal+0xfa/0x1c0 [ +0.000005] bus_remove_device+0x18a/0x260 [ +0.000007] device_del+0x2e5/0x650 [ +0.000005] ? __pfx_device_del+0x10/0x10 [ +0.000003] ? mutex_unlock+0x84/0xd0 [ +0.000004] ? __pfx_mutex_unlock+0x10/0x10 [ +0.000004] ? _raw_spin_unlock+0x18/0x40 [ +0.000005] ice_unplug_aux_dev+0x52/0x70 [ice] [ +0.000160] ice_service_task+0x1309/0x14f0 [ice] [ +0.000134] ? __pfx___schedule+0x10/0x10 [ +0.000006] process_one_work+0x3b1/0x6c0 [ +0.000008] worker_thread+0x69/0x670 [ +0.000005] ? __kthread_parkme+0xec/0x110 [ +0.000007] ? __pfx_worker_thread+0x10/0x10 [ +0.000005] kthread+0x17f/0x1b0 [ +0.000005] ? __pfx_kthread+0x10/0x10 [ +0.000004] ret_from_fork+0x29/0x50 [ +0.000009] Fixes: 940b61af02f4 ("ice: Initialize PF and setup miscellaneous interrupt") Signed-off-by: Anirudh Venkataramanan Signed-off-by: Marcin Szycik Tested-by: Jakub Andrysiak Signed-off-by: Tony Nguyen Reviewed-by: Leon Romanovsky --- drivers/net/ethernet/intel/ice/ice_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5f86e4111fa9..b288a01a321a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5541,7 +5541,7 @@ static int __init ice_module_init(void) pr_info("%s\n", ice_driver_string); pr_info("%s\n", ice_copyright); - ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; -- cgit v1.2.3 From b2dbde3ad44f290ded319f00b71dccec90083a50 Mon Sep 17 00:00:00 2001 From: Michal Swiatkowski Date: Wed, 4 Jan 2023 09:25:17 +0100 Subject: ice: fix out-of-bounds KASAN warning in virtchnl KASAN reported: [ 9793.708867] BUG: KASAN: global-out-of-bounds in ice_get_link_speed+0x16/0x30 [ice] [ 9793.709205] Read of size 4 at addr ffffffffc1271b1c by task kworker/6:1/402 [ 9793.709222] CPU: 6 PID: 402 Comm: kworker/6:1 Kdump: loaded Tainted: G B OE 6.1.0+ #3 [ 9793.709235] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.00.01.0014.070920180847 07/09/2018 [ 9793.709245] Workqueue: ice ice_service_task [ice] [ 9793.709575] Call Trace: [ 9793.709582] [ 9793.709588] dump_stack_lvl+0x44/0x5c [ 9793.709613] print_report+0x17f/0x47b [ 9793.709632] ? __cpuidle_text_end+0x5/0x5 [ 9793.709653] ? ice_get_link_speed+0x16/0x30 [ice] [ 9793.709986] ? ice_get_link_speed+0x16/0x30 [ice] [ 9793.710317] kasan_report+0xb7/0x140 [ 9793.710335] ? ice_get_link_speed+0x16/0x30 [ice] [ 9793.710673] ice_get_link_speed+0x16/0x30 [ice] [ 9793.711006] ice_vc_notify_vf_link_state+0x14c/0x160 [ice] [ 9793.711351] ? ice_vc_repr_cfg_promiscuous_mode+0x120/0x120 [ice] [ 9793.711698] ice_vc_process_vf_msg+0x7a7/0xc00 [ice] [ 9793.712074] __ice_clean_ctrlq+0x98f/0xd20 [ice] [ 9793.712534] ? ice_bridge_setlink+0x410/0x410 [ice] [ 9793.712979] ? __request_module+0x320/0x520 [ 9793.713014] ? ice_process_vflr_event+0x27/0x130 [ice] [ 9793.713489] ice_service_task+0x11cf/0x1950 [ice] [ 9793.713948] ? io_schedule_timeout+0xb0/0xb0 [ 9793.713972] process_one_work+0x3d0/0x6a0 [ 9793.714003] worker_thread+0x8a/0x610 [ 9793.714031] ? process_one_work+0x6a0/0x6a0 [ 9793.714049] kthread+0x164/0x1a0 [ 9793.714071] ? kthread_complete_and_exit+0x20/0x20 [ 9793.714100] ret_from_fork+0x1f/0x30 [ 9793.714137] [ 9793.714151] The buggy address belongs to the variable: [ 9793.714158] ice_aq_to_link_speed+0x3c/0xffffffffffff3520 [ice] [ 9793.714632] Memory state around the buggy address: [ 9793.714642] ffffffffc1271a00: f9 f9 f9 f9 00 00 05 f9 f9 f9 f9 f9 00 00 02 f9 [ 9793.714656] ffffffffc1271a80: f9 f9 f9 f9 00 00 04 f9 f9 f9 f9 f9 00 00 00 00 [ 9793.714670] >ffffffffc1271b00: 00 00 00 04 f9 f9 f9 f9 04 f9 f9 f9 f9 f9 f9 f9 [ 9793.714680] ^ [ 9793.714690] ffffffffc1271b80: 00 00 00 00 00 04 f9 f9 f9 f9 f9 f9 00 00 00 00 [ 9793.714704] ffffffffc1271c00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 The ICE_AQ_LINK_SPEED_UNKNOWN define is BIT(15). The value is bigger than both legacy and normal link speed tables. Add one element (0 - unknown) to both tables. There is no need to explicitly set table size, leave it empty. Fixes: 1d0e28a9be1f ("ice: Remove and replace ice speed defines with ethtool.h versions") Signed-off-by: Michal Swiatkowski Reviewed-by: Alexander Lobakin Tested-by: Gurucharan G (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Reviewed-by: Leon Romanovsky --- drivers/net/ethernet/intel/ice/ice_common.c | 9 ++++----- drivers/net/ethernet/intel/ice/ice_vf_mbx.c | 21 ++++++++------------- 2 files changed, 12 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d02b55b6aa9c..3e08847505ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -5524,7 +5524,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) * returned by the firmware is a 16 bit * value, but is indexed * by [fls(speed) - 1] */ -static const u32 ice_aq_to_link_speed[15] = { +static const u32 ice_aq_to_link_speed[] = { SPEED_10, /* BIT(0) */ SPEED_100, SPEED_1000, @@ -5536,10 +5536,6 @@ static const u32 ice_aq_to_link_speed[15] = { SPEED_40000, SPEED_50000, SPEED_100000, /* BIT(10) */ - 0, - 0, - 0, - 0 /* BIT(14) */ }; /** @@ -5550,5 +5546,8 @@ static const u32 ice_aq_to_link_speed[15] = { */ u32 ice_get_link_speed(u16 index) { + if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) + return 0; + return ice_aq_to_link_speed[index]; } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c index d4a4001b6e5d..f56fa94ff3d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c @@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); } -static const u32 ice_legacy_aq_to_vc_speed[15] = { +static const u32 ice_legacy_aq_to_vc_speed[] = { VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */ VIRTCHNL_LINK_SPEED_100MB, VIRTCHNL_LINK_SPEED_1GB, @@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = { VIRTCHNL_LINK_SPEED_40GB, VIRTCHNL_LINK_SPEED_40GB, VIRTCHNL_LINK_SPEED_40GB, - VIRTCHNL_LINK_SPEED_UNKNOWN, - VIRTCHNL_LINK_SPEED_UNKNOWN, - VIRTCHNL_LINK_SPEED_UNKNOWN, - VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */ }; /** @@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = { */ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) { - u32 speed; + /* convert a BIT() value into an array index */ + u32 index = fls(link_speed) - 1; - if (adv_link_support) { - /* convert a BIT() value into an array index */ - speed = ice_get_link_speed(fls(link_speed) - 1); - } else { + if (adv_link_support) + return ice_get_link_speed(index); + else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed)) /* Virtchnl speeds are not defined for every speed supported in * the hardware. To maintain compatibility with older AVF * drivers, while reporting the speed the new speed values are * resolved to the closest known virtchnl speeds */ - speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1]; - } + return ice_legacy_aq_to_vc_speed[index]; - return speed; + return VIRTCHNL_LINK_SPEED_UNKNOWN; } /* The mailbox overflow detection algorithm helps to check if there -- cgit v1.2.3 From c793f8ea15e312789b5b6b4a5e7b0b92315be5cb Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Mon, 16 Jan 2023 13:34:58 +0100 Subject: ice: Fix disabling Rx VLAN filtering with port VLAN enabled If the user turns on the vf-true-promiscuous-support flag, then Rx VLAN filtering will be disabled if the VF requests to enable promiscuous mode. When the VF is in a port VLAN, this is the incorrect behavior because it will allow the VF to receive traffic outside of its port VLAN domain. Fortunately this only resulted in the VF(s) receiving broadcast traffic outside of the VLAN domain because all of the VLAN promiscuous rules are based on the port VLAN ID. Fix this by setting the .disable_rx_filtering VLAN op to a no-op when a port VLAN is enabled on the VF. Also, make sure to make this fix for both Single VLAN Mode and Double VLAN Mode enabled devices. Fixes: c31af68a1b94 ("ice: Add outer_vlan_ops and VSI specific VLAN ops implementations") Signed-off-by: Brett Creeley Signed-off-by: Karen Ostrowska Tested-by: Marek Szlosek Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index 5ecc0ee9a78e..b1ffb81893d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) /* outer VLAN ops regardless of port VLAN config */ vlan_ops->add_vlan = ice_vsi_add_vlan; - vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; if (ice_vf_is_port_vlan_ena(vf)) { /* setup outer VLAN ops */ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; + /* all Rx traffic should be in the domain of the + * assigned port VLAN, so prevent disabling Rx VLAN + * filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; @@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; } else { + vlan_ops->dis_rx_filtering = + ice_vsi_dis_rx_vlan_filtering; + if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) vlan_ops->ena_rx_filtering = noop_vlan; else @@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; + /* all Rx traffic should be in the domain of the + * assigned port VLAN, so prevent disabling Rx VLAN + * filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; } else { + vlan_ops->dis_rx_filtering = + ice_vsi_dis_rx_vlan_filtering; if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) vlan_ops->ena_rx_filtering = noop_vlan; else -- cgit v1.2.3 From 3f4870df1b15d62665cb86ca116c8c9cf0e830b0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 28 Oct 2022 18:02:24 +0300 Subject: ice: Fix off by one in ice_tc_forward_to_queue() The > comparison should be >= to prevent reading one element beyond the end of the array. The "vsi->num_rxq" is not strictly speaking the number of elements in the vsi->rxq_map[] array. The array has "vsi->alloc_rxq" elements and "vsi->num_rxq" is less than or equal to the number of elements in the array. The array is allocated in ice_vsi_alloc_arrays(). It's still an off by one but it might not access outside the end of the array. Fixes: 143b86f346c7 ("ice: Enable RX queue selection using skbedit action") Signed-off-by: Dan Carpenter Acked-by: Amritha Nambiar Tested-by: Bharathi Sreenivas Signed-off-by: Tony Nguyen Reviewed-by: Leon Romanovsky --- drivers/net/ethernet/intel/ice/ice_tc_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index faba0f857cd9..95f392ab9670 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -1681,7 +1681,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, struct ice_vsi *ch_vsi = NULL; u16 queue = act->rx_queue; - if (queue > vsi->num_rxq) { + if (queue >= vsi->num_rxq) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified queue is invalid"); return -EINVAL; -- cgit v1.2.3 From 4a606ce68426c88ff2563382b33cc34f3485fe57 Mon Sep 17 00:00:00 2001 From: Zhang Changzhong Date: Mon, 12 Dec 2022 15:11:26 -0800 Subject: ice: switch: fix potential memleak in ice_add_adv_recipe() When ice_add_special_words() fails, the 'rm' is not released, which will lead to a memory leak. Fix this up by going to 'err_unroll' label. Compile tested only. Fixes: 8b032a55c1bd ("ice: low level support for tunnels") Signed-off-by: Zhang Changzhong Tested-by: Sujai Buvaneswaran Signed-off-by: Tony Nguyen Reviewed-by: Leon Romanovsky --- drivers/net/ethernet/intel/ice/ice_switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 9b762f7972ce..61f844d22512 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw)); if (status) - goto err_free_lkup_exts; + goto err_unroll; /* Group match words into recipes using preferred recipe grouping * criteria. -- cgit v1.2.3 From f964f8399df29d3e3ced77177cf35131cd2491bf Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Feb 2023 21:24:08 +0200 Subject: net: mscc: ocelot: fix VCAP filters not matching on MAC with "protocol 802.1Q" Alternative short title: don't instruct the hardware to match on EtherType with "protocol 802.1Q" flower filters. It doesn't work for the reasons detailed below. With a command such as the following: tc filter add dev $swp1 ingress chain $(IS1 2) pref 3 \ protocol 802.1Q flower skip_sw vlan_id 200 src_mac $h1_mac \ action vlan modify id 300 \ action goto chain $(IS2 0 0) the created filter is set by ocelot_flower_parse_key() to be of type OCELOT_VCAP_KEY_ETYPE, and etype is set to {value=0x8100, mask=0xffff}. This gets propagated all the way to is1_entry_set() which commits it to hardware (the VCAP_IS1_HK_ETYPE field of the key). Compare this to the case where src_mac isn't specified - the key type is OCELOT_VCAP_KEY_ANY, and is1_entry_set() doesn't populate VCAP_IS1_HK_ETYPE. The problem is that for VLAN-tagged frames, the hardware interprets the ETYPE field as holding the encapsulated VLAN protocol. So the above filter will only match those packets which have an encapsulated protocol of 0x8100, rather than all packets with VLAN ID 200 and the given src_mac. The reason why this is allowed to occur is because, although we have a block of code in ocelot_flower_parse_key() which sets "match_protocol" to false when VLAN keys are present, that code executes too late. There is another block of code, which executes for Ethernet addresses, and has a "goto finished_key_parsing" and skips the VLAN header parsing. By skipping it, "match_protocol" remains with the value it was initialized with, i.e. "true", and "proto" is set to f->common.protocol, or 0x8100. The concept of ignoring some keys rather than erroring out when they are present but can't be offloaded is dubious in itself, but is present since the initial commit fe3490e6107e ("net: mscc: ocelot: Hardware ofload for tc flower filter"), and it's outside of the scope of this patch to change that. The problem was introduced when the driver started to interpret the flower filter's protocol, and populate the VCAP filter's ETYPE field based on it. To fix this, it is sufficient to move the code that parses the VLAN keys earlier than the "goto finished_key_parsing" instruction. This will ensure that if we have a flower filter with both VLAN and Ethernet address keys, it won't match on ETYPE 0x8100, because the VLAN key parsing sets "match_protocol = false". Fixes: 86b956de119c ("net: mscc: ocelot: support matching on EtherType") Signed-off-by: Vladimir Oltean Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230205192409.1796428-1-vladimir.oltean@nxp.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/mscc/ocelot_flower.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 7c0897e779dc..ee052404eb55 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, flow_rule_match_control(rule, &match); } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->vlan.vid.value = match.key->vlan_id; + filter->vlan.vid.mask = match.mask->vlan_id; + filter->vlan.pcp.value[0] = match.key->vlan_priority; + filter->vlan.pcp.mask[0] = match.mask->vlan_priority; + match_protocol = false; + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; @@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, match_protocol = false; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_match_vlan match; - - flow_rule_match_vlan(rule, &match); - filter->key_type = OCELOT_VCAP_KEY_ANY; - filter->vlan.vid.value = match.key->vlan_id; - filter->vlan.vid.mask = match.mask->vlan_id; - filter->vlan.pcp.value[0] = match.key->vlan_priority; - filter->vlan.pcp.mask[0] = match.mask->vlan_priority; - match_protocol = false; - } - finished_key_parsing: if (match_protocol && proto != ETH_P_ALL) { if (filter->block_id == VCAP_ES0) { -- cgit v1.2.3 From 1e66220948df815d7b37e0ff8b4627ce10433738 Mon Sep 17 00:00:00 2001 From: Adham Faris Date: Sun, 8 Jan 2023 18:09:32 +0200 Subject: net/mlx5e: Update rx ring hw mtu upon each rx-fcs flag change rq->hw_mtu is used in function en_rx.c/mlx5e_skb_from_cqe_mpwrq_linear() to catch oversized packets. If FCS is concatenated to the end of the packet then the check should be updated accordingly. Rx rings initialization (mlx5e_init_rxq_rq()) invoked for every new set of channels, as part of mlx5e_safe_switch_params(), unknowingly if it runs with default configuration or not. Current rq->hw_mtu initialization assumes default configuration and ignores params->scatter_fcs_en flag state. Fix this, by accounting for params->scatter_fcs_en flag state during rq->hw_mtu initialization. In addition, updating rq->hw_mtu value during ingress traffic might lead to packets drop and oversize_pkts_sw_drop counter increase with no good reason. Hence we remove this optimization and switch the set of channels with a new one, to make sure we don't get false positives on the oversize_pkts_sw_drop counter. Fixes: 102722fc6832 ("net/mlx5e: Add support for RXFCS feature flag") Signed-off-by: Adham Faris Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 86 ++++------------------- 1 file changed, 15 insertions(+), 71 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index abcc614b6191..f8b5e5993f35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -591,7 +591,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param rq->ix = c->ix; rq->channel = c; rq->mdev = mdev; - rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->hw_mtu = + MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en; rq->xdpsq = &c->rq_xdpsq; rq->stats = &c->priv->channel_stats[c->ix]->rq; rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); @@ -1014,35 +1015,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state) return mlx5e_rq_to_ready(rq, curr_state); } -static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) -{ - struct mlx5_core_dev *mdev = rq->mdev; - - void *in; - void *rqc; - int inlen; - int err; - - inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); - - MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); - MLX5_SET64(modify_rq_in, in, modify_bitmask, - MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS); - MLX5_SET(rqc, rqc, scatter_fcs, enable); - MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); - - err = mlx5_core_modify_rq(mdev, rq->rqn, in); - - kvfree(in); - - return err; -} - static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) { struct mlx5_core_dev *mdev = rq->mdev; @@ -3314,20 +3286,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) mlx5e_destroy_tises(priv); } -static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) -{ - int err = 0; - int i; - - for (i = 0; i < chs->num; i++) { - err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); - if (err) - return err; - } - - return 0; -} - static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) { int err; @@ -3903,41 +3861,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) return mlx5_set_ports_check(mdev, in, sizeof(in)); } +static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx) +{ + struct mlx5_core_dev *mdev = priv->mdev; + bool enable = *(bool *)ctx; + + return mlx5e_set_rx_port_ts(mdev, enable); +} + static int set_feature_rx_fcs(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels *chs = &priv->channels; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_params new_params; int err; mutex_lock(&priv->state_lock); - if (enable) { - err = mlx5e_set_rx_port_ts(mdev, false); - if (err) - goto out; - - chs->params.scatter_fcs_en = true; - err = mlx5e_modify_channels_scatter_fcs(chs, true); - if (err) { - chs->params.scatter_fcs_en = false; - mlx5e_set_rx_port_ts(mdev, true); - } - } else { - chs->params.scatter_fcs_en = false; - err = mlx5e_modify_channels_scatter_fcs(chs, false); - if (err) { - chs->params.scatter_fcs_en = true; - goto out; - } - err = mlx5e_set_rx_port_ts(mdev, true); - if (err) { - mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err); - err = 0; - } - } - -out: + new_params = chs->params; + new_params.scatter_fcs_en = enable; + err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap, + &new_params.scatter_fcs_en, true); mutex_unlock(&priv->state_lock); return err; } -- cgit v1.2.3 From 288d85e07fbca5ee35882a4884dd701e43637be1 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Tue, 17 Jan 2023 17:34:20 +0200 Subject: net/mlx5: DR, Fix potential race in dr_rule_create_rule_nic Selecting builder should be protected by the lock to prevent the case where a new rule sets a builder in the nic_matcher while the previous rule is still using the nic_matcher. Fixing this issue and cleaning the error flow. Fixes: b9b81e1e9382 ("net/mlx5: DR, For short chains of STEs, avoid allocating ste_arr dynamically") Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/steering/dr_rule.c | 25 +++++++++++++--------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index b851141e03de..042ca0349124 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, rule->flow_source)) return 0; + mlx5dr_domain_nic_lock(nic_dmn); + ret = mlx5dr_matcher_select_builders(matcher, nic_matcher, dr_rule_get_ipv(¶m->outer), dr_rule_get_ipv(¶m->inner)); if (ret) - return ret; + goto err_unlock; hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED; if (likely(hw_ste_arr_is_opt)) { @@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) * DR_STE_SIZE, GFP_KERNEL); - if (!hw_ste_arr) - return -ENOMEM; + if (!hw_ste_arr) { + ret = -ENOMEM; + goto err_unlock; + } } - mlx5dr_domain_nic_lock(nic_dmn); - ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher); if (ret) goto free_hw_ste; @@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, mlx5dr_domain_nic_unlock(nic_dmn); - goto out; + if (unlikely(!hw_ste_arr_is_opt)) + kfree(hw_ste_arr); + + return 0; free_rule: dr_rule_clean_rule_members(rule, nic_rule); @@ -1238,12 +1243,12 @@ remove_from_nic_tbl: mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher); free_hw_ste: - mlx5dr_domain_nic_unlock(nic_dmn); - -out: - if (unlikely(!hw_ste_arr_is_opt)) + if (!hw_ste_arr_is_opt) kfree(hw_ste_arr); +err_unlock: + mlx5dr_domain_nic_unlock(nic_dmn); + return ret; } -- cgit v1.2.3 From da0c52426cd23f8728eff72c2b2d2a3eb6b451f5 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 26 Jan 2023 14:47:12 +0100 Subject: net/mlx5: Bridge, fix ageing of peer FDB entries SWITCHDEV_FDB_ADD_TO_BRIDGE event handler that updates FDB entry 'lastuse' field is only executed for eswitch that owns the entry. However, if peer entry processed packets at least once it will have hardware counter 'used' value greater than entry 'lastuse' from that point on, which will cause FDB entry not being aged out. Process the event on all eswitch instances. Fixes: ff9b7521468b ("net/mlx5: Bridge, support LAG") Signed-off-by: Vlad Buslov Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c | 4 ---- drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 8099a21e674c..ce85b48d327d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, switch (event) { case SWITCHDEV_FDB_ADD_TO_BRIDGE: - /* only handle the event on native eswtich of representor */ - if (!mlx5_esw_bridge_is_local(dev, rep, esw)) - break; - fdb_info = container_of(info, struct switchdev_notifier_fdb_info, info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index b176648d1343..3cdcb0e0b20f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 struct mlx5_esw_bridge *bridge; port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); - if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) + if (!port) return; bridge = port->bridge; -- cgit v1.2.3 From 8974aa9638df557f4642acef707af15648a03555 Mon Sep 17 00:00:00 2001 From: Amir Tzin Date: Sun, 8 Jan 2023 15:54:46 +0200 Subject: net/mlx5e: Fix crash unsetting rx-vlan-filter in switchdev mode Moving to switchdev mode with rx-vlan-filter on and then setting it off causes the kernel to crash since fs->vlan is freed during nic profile cleanup flow. RX VLAN filtering is not supported in switchdev mode so unset it when changing to switchdev and restore its value when switching back to legacy. trace: [] RIP: 0010:mlx5e_disable_cvlan_filter+0x43/0x70 [] set_feature_cvlan_filter+0x37/0x40 [mlx5_core] [] mlx5e_handle_feature+0x3a/0x60 [mlx5_core] [] mlx5e_set_features+0x6d/0x160 [mlx5_core] [] __netdev_update_features+0x288/0xa70 [] ethnl_set_features+0x309/0x380 [] ? __nla_parse+0x21/0x30 [] genl_family_rcv_msg_doit.isra.17+0x110/0x150 [] genl_rcv_msg+0x112/0x260 [] ? features_reply_size+0xe0/0xe0 [] ? genl_family_rcv_msg_doit.isra.17+0x150/0x150 [] netlink_rcv_skb+0x4e/0x100 [] genl_rcv+0x24/0x40 [] netlink_unicast+0x1ab/0x290 [] netlink_sendmsg+0x257/0x4f0 [] sock_sendmsg+0x5c/0x70 Fixes: cb67b832921c ("net/mlx5e: Introduce SRIOV VF representors") Signed-off-by: Amir Tzin Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1892ccb889b3..7cd36f4ac3ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -443,7 +443,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc) void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc) { - if (fs->vlan->cvlan_filter_disabled) + if (!fs->vlan || fs->vlan->cvlan_filter_disabled) return; fs->vlan->cvlan_filter_disabled = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f8b5e5993f35..6c24f33a5ea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4018,6 +4018,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev if (netdev->features & NETIF_F_GRO_HW) netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n"); + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n"); + return features; } -- cgit v1.2.3 From 8aa5f171d51c1cb69e5e3106df4dd1a446102823 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 23 Jan 2023 16:06:32 +0200 Subject: net/mlx5e: IPoIB, Show unknown speed instead of error ethtool is returning an error for unknown speeds for the IPoIB interface: $ ethtool ib0 netlink error: failed to retrieve link settings netlink error: Invalid argument netlink error: failed to retrieve link settings netlink error: Invalid argument Settings for ib0: Link detected: no After this change, ethtool will return success and show "unknown speed": $ ethtool ib0 Settings for ib0: Supported ports: [ ] Supported link modes: Not reported Supported pause frame use: No Supports auto-negotiation: No Supported FEC modes: Not reported Advertised link modes: Not reported Advertised pause frame use: No Advertised auto-negotiation: No Advertised FEC modes: Not reported Speed: Unknown! Duplex: Full Auto-negotiation: off Port: Other PHYAD: 0 Transceiver: internal Link detected: no Fixes: eb234ee9d541 ("net/mlx5e: IPoIB, Add support for get_link_ksettings in ethtool") Signed-off-by: Dragos Tatulea Reviewed-by: Gal Pressman Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index eff92dc0927c..e09518f887a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -189,16 +189,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate) } } -static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) +static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) { int rate, width; rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper); if (rate < 0) - return -EINVAL; + return SPEED_UNKNOWN; width = mlx5_ptys_width_enum_to_int(ib_link_width_oper); if (width < 0) - return -EINVAL; + return SPEED_UNKNOWN; return rate * width; } @@ -221,16 +221,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper); - if (speed < 0) - return -EINVAL; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL; - link_ksettings->base.duplex = DUPLEX_FULL; link_ksettings->base.port = PORT_OTHER; link_ksettings->base.autoneg = AUTONEG_DISABLE; - link_ksettings->base.speed = speed; - return 0; } -- cgit v1.2.3 From c3bdbaea654d8df39112de33037106134a520dc7 Mon Sep 17 00:00:00 2001 From: Maher Sanalla Date: Sun, 22 Jan 2023 21:09:40 +0200 Subject: net/mlx5: Store page counters in a single array Currently, an independent page counter is used for tracking memory usage for each function type such as VF, PF and host PF (DPU). For better code-readibilty, use a single array that stores the number of allocated memory pages for each function type. Signed-off-by: Maher Sanalla Reviewed-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 4 +-- drivers/net/ethernet/mellanox/mlx5/core/ecpf.c | 2 +- .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 37 ++++++++++++---------- drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 2 +- include/linux/mlx5/driver.h | 12 +++++-- 5 files changed, 34 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 3e232a65a0c3..c3e7c24a0971 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -245,8 +245,8 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev) pages = dev->priv.dbg.pages_debugfs; debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages); - debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages); - debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages); + debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]); + debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]); debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed); debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped); debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c index 464eb3a18450..cdc87ecae5d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c @@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) mlx5_host_pf_cleanup(dev); - err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages); + err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]); if (err) mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 60596357bfc7..9f99292ab5ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function) return (u32)func_id | (ec_function << 16); } +static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function) +{ + if (!func_id) + return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF; + + return MLX5_VF; +} + static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) { struct rb_root *root; @@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); int notify_fail = event; + u16 func_type; u64 addr; int err; u32 *in; @@ -383,11 +392,9 @@ retry: goto out_dropped; } + func_type = func_id_to_type(dev, func_id, ec_function); + dev->priv.page_counters[func_type] += npages; dev->priv.fw_pages += npages; - if (func_id) - dev->priv.vfs_pages += npages; - else if (mlx5_core_is_ecpf(dev) && !ec_function) - dev->priv.host_pf_pages += npages; mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", npages, ec_function, func_id, err); @@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, struct rb_root *root; struct rb_node *p; int npages = 0; + u16 func_type; root = xa_load(&dev->priv.page_root_xa, function); if (WARN_ON_ONCE(!root)) @@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, free_fwp(dev, fwp, fwp->free_count); } + func_type = func_id_to_type(dev, func_id, ec_function); + dev->priv.page_counters[func_type] -= npages; dev->priv.fw_pages -= npages; - if (func_id) - dev->priv.vfs_pages -= npages; - else if (mlx5_core_is_ecpf(dev) && !ec_function) - dev->priv.host_pf_pages -= npages; mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", npages, ec_function, func_id); @@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; int num_claimed; + u16 func_type; u32 *out; int err; int i; @@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, if (nclaimed) *nclaimed = num_claimed; + func_type = func_id_to_type(dev, func_id, ec_function); + dev->priv.page_counters[func_type] -= num_claimed; dev->priv.fw_pages -= num_claimed; - if (func_id) - dev->priv.vfs_pages -= num_claimed; - else if (mlx5_core_is_ecpf(dev) && !ec_function) - dev->priv.host_pf_pages -= num_claimed; out_free: kvfree(out); @@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) WARN(dev->priv.fw_pages, "FW pages counter is %d after reclaiming all pages\n", dev->priv.fw_pages); - WARN(dev->priv.vfs_pages, + WARN(dev->priv.page_counters[MLX5_VF], "VFs FW pages counter is %d after reclaiming all pages\n", - dev->priv.vfs_pages); - WARN(dev->priv.host_pf_pages, + dev->priv.page_counters[MLX5_VF]); + WARN(dev->priv.page_counters[MLX5_HOST_PF], "External host PF FW pages counter is %d after reclaiming all pages\n", - dev->priv.host_pf_pages); + dev->priv.page_counters[MLX5_HOST_PF]); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index c0e6c487c63c..3008e9ce2bbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf); - if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) + if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF])) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 76ef2e4fde38..82a9bd4274b8 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -573,6 +573,13 @@ struct mlx5_debugfs_entries { struct dentry *lag_debugfs; }; +enum mlx5_func_type { + MLX5_PF, + MLX5_VF, + MLX5_HOST_PF, + MLX5_FUNC_TYPE_NUM, +}; + struct mlx5_ft_pool; struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ @@ -583,11 +590,10 @@ struct mlx5_priv { struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; struct xarray page_root_xa; - u32 fw_pages; atomic_t reg_pages; struct list_head free_list; - u32 vfs_pages; - u32 host_pf_pages; + u32 fw_pages; + u32 page_counters[MLX5_FUNC_TYPE_NUM]; u32 fw_pages_alloc_failed; u32 give_pages_dropped; u32 reclaim_pages_discard; -- cgit v1.2.3 From 9965bbebae59b3563a4d95e4aed121e8965dfdc2 Mon Sep 17 00:00:00 2001 From: Maher Sanalla Date: Sun, 22 Jan 2023 23:24:56 +0200 Subject: net/mlx5: Expose SF firmware pages counter Currently, each core device has VF pages counter which stores number of fw pages used by its VFs and SFs. The current design led to a hang when performing firmware reset on DPU, where the DPU PFs stalled in sriov unload flow due to waiting on release of SFs pages instead of waiting on only VFs pages. Thus, Add a separate counter for SF firmware pages, which will prevent the stall scenario described above. Fixes: 1958fc2f0712 ("net/mlx5: SF, Add auxiliary device driver") Signed-off-by: Maher Sanalla Reviewed-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 2 +- include/linux/mlx5/driver.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index c3e7c24a0971..bb95b40d25eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -246,6 +246,7 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev) debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages); debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]); + debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]); debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]); debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed); debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 9f99292ab5ce..0eb50be175cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -79,7 +79,7 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct if (!func_id) return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF; - return MLX5_VF; + return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF; } static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 82a9bd4274b8..333c1fec72f8 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -576,6 +576,7 @@ struct mlx5_debugfs_entries { enum mlx5_func_type { MLX5_PF, MLX5_VF, + MLX5_SF, MLX5_HOST_PF, MLX5_FUNC_TYPE_NUM, }; -- cgit v1.2.3 From db561fed6b8fa3878e74d5df6512a4a38152b63e Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Mon, 9 Jan 2023 15:27:40 +0200 Subject: net/mlx5: fw_tracer, Clear load bit when freeing string DBs buffers Whenever the driver is reading the string DBs into buffers, the driver is setting the load bit, but the driver never clears this bit. As a result, in case load bit is on and the driver query the device for new string DBs, the driver won't read again the string DBs. Fix it by clearing the load bit when query the device for new string DBs. Fixes: 2d69356752ff ("net/mlx5: Add support for fw live patch event") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 21831386b26e..d82e98a0cdfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) MLX5_GET(mtrc_cap, out, num_string_trace); tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db); tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); + tracer->str_db.loaded = false; for (i = 0; i < tracer->str_db.num_string_db; i++) { mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]); -- cgit v1.2.3 From 184e1e4474dbcfebc4dbd1fa823a329978f25506 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Wed, 25 Jan 2023 17:39:36 +0200 Subject: net/mlx5: fw_tracer, Zero consumer index when reloading the tracer When tracer is reloaded, the device will log the traces at the beginning of the log buffer. Also, driver is reading the log buffer in chunks in accordance to the consumer index. Hence, zero consumer index when reloading the tracer. Fixes: 4383cfcc65e7 ("net/mlx5: Add devlink reload") Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index d82e98a0cdfa..5b05b884b5fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -757,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer) if (err) mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err); + tracer->buff.consumer_index = 0; return err; } @@ -821,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work) mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); if (tracer->owner) { tracer->owner = false; - tracer->buff.consumer_index = 0; return; } -- cgit v1.2.3 From 8f0d1451ecf7b3bd5a06ffc866c753d0f3ab4683 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Wed, 14 Dec 2022 22:16:23 +0200 Subject: net/mlx5: Serialize module cleanup with reload and remove Currently, remove and reload flows can run in parallel to module cleanup. This design is error prone. For example: aux_drivers callbacks are called from both cleanup and remove flows with different lockings, which can cause a deadlock[1]. Hence, serialize module cleanup with reload and remove. [1] cleanup remove ------- ------ auxiliary_driver_unregister(); devl_lock() auxiliary_device_delete(mlx5e_aux) device_lock(mlx5e_aux) devl_lock() device_lock(mlx5e_aux) Fixes: 912cebf420c2 ("net/mlx5e: Connect ethernet part to auxiliary bus") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3d5f2a4b1fed..4e1b5757528a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -2110,7 +2110,7 @@ static int __init mlx5_init(void) mlx5_core_verify_params(); mlx5_register_debugfs(); - err = pci_register_driver(&mlx5_core_driver); + err = mlx5e_init(); if (err) goto err_debug; @@ -2118,16 +2118,16 @@ static int __init mlx5_init(void) if (err) goto err_sf; - err = mlx5e_init(); + err = pci_register_driver(&mlx5_core_driver); if (err) - goto err_en; + goto err_pci; return 0; -err_en: +err_pci: mlx5_sf_driver_unregister(); err_sf: - pci_unregister_driver(&mlx5_core_driver); + mlx5e_cleanup(); err_debug: mlx5_unregister_debugfs(); return err; @@ -2135,9 +2135,9 @@ err_debug: static void __exit mlx5_cleanup(void) { - mlx5e_cleanup(); - mlx5_sf_driver_unregister(); pci_unregister_driver(&mlx5_core_driver); + mlx5_sf_driver_unregister(); + mlx5e_cleanup(); mlx5_unregister_debugfs(); } -- cgit v1.2.3 From 18a048370b06a3a521219e9e5b10bdc2178ef19c Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Mon, 6 Feb 2023 13:28:49 -0800 Subject: net: mana: Fix accessing freed irq affinity_hint After calling irq_set_affinity_and_hint(), the cpumask pointer is saved in desc->affinity_hint, and will be used later when reading /proc/irq//affinity_hint. So the cpumask variable needs to be persistent. Otherwise, we are accessing freed memory when reading the affinity_hint file. Also, need to clear affinity_hint before free_irq(), otherwise there is a one-time warning and stack trace during module unloading: [ 243.948687] WARNING: CPU: 10 PID: 1589 at kernel/irq/manage.c:1913 free_irq+0x318/0x360 ... [ 243.948753] Call Trace: [ 243.948754] [ 243.948760] mana_gd_remove_irqs+0x78/0xc0 [mana] [ 243.948767] mana_gd_remove+0x3e/0x80 [mana] [ 243.948773] pci_device_remove+0x3d/0xb0 [ 243.948778] device_remove+0x46/0x70 [ 243.948782] device_release_driver_internal+0x1fe/0x280 [ 243.948785] driver_detach+0x4e/0xa0 [ 243.948787] bus_remove_driver+0x70/0xf0 [ 243.948789] driver_unregister+0x35/0x60 [ 243.948792] pci_unregister_driver+0x44/0x90 [ 243.948794] mana_driver_exit+0x14/0x3fe [mana] [ 243.948800] __do_sys_delete_module.constprop.0+0x185/0x2f0 To fix the bug, use the persistent mask, cpumask_of(cpu#), and set affinity_hint to NULL before freeing the IRQ, as required by free_irq(). Cc: stable@vger.kernel.org Fixes: 71fa6887eeca ("net: mana: Assign interrupts to CPUs based on NUMA nodes") Signed-off-by: Haiyang Zhang Reviewed-by: Michael Kelley Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/1675718929-19565-1-git-send-email-haiyangz@microsoft.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/microsoft/mana/gdma_main.c | 37 ++++++++----------------- 1 file changed, 11 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index b144f2237748..f9b8f372ec8a 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) unsigned int max_queues_per_port = num_online_cpus(); struct gdma_context *gc = pci_get_drvdata(pdev); struct gdma_irq_context *gic; - unsigned int max_irqs; - u16 *cpus; - cpumask_var_t req_mask; + unsigned int max_irqs, cpu; int nvec, irq; int err, i = 0, j; @@ -1240,21 +1238,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) goto free_irq_vector; } - if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) { - err = -ENOMEM; - goto free_irq; - } - - cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL); - if (!cpus) { - err = -ENOMEM; - goto free_mask; - } - for (i = 0; i < nvec; i++) - cpus[i] = cpumask_local_spread(i, gc->numa_node); - for (i = 0; i < nvec; i++) { - cpumask_set_cpu(cpus[i], req_mask); gic = &gc->irq_contexts[i]; gic->handler = NULL; gic->arg = NULL; @@ -1269,17 +1253,16 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) irq = pci_irq_vector(pdev, i); if (irq < 0) { err = irq; - goto free_mask; + goto free_irq; } err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); if (err) - goto free_mask; - irq_set_affinity_and_hint(irq, req_mask); - cpumask_clear(req_mask); + goto free_irq; + + cpu = cpumask_local_spread(i, gc->numa_node); + irq_set_affinity_and_hint(irq, cpumask_of(cpu)); } - free_cpumask_var(req_mask); - kfree(cpus); err = mana_gd_alloc_res_map(nvec, &gc->msix_resource); if (err) @@ -1290,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) return 0; -free_mask: - free_cpumask_var(req_mask); - kfree(cpus); free_irq: for (j = i - 1; j >= 0; j--) { irq = pci_irq_vector(pdev, j); gic = &gc->irq_contexts[j]; + + irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); } @@ -1324,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev) continue; gic = &gc->irq_contexts[i]; + + /* Need to clear the hint before free_irq */ + irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); } -- cgit v1.2.3 From 9b275176270efd18f2f4e328b32be1bad34c4c0d Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 6 Feb 2023 15:58:18 -0800 Subject: igc: Add ndo_tx_timeout support On some platforms, 100/1000/2500 speeds seem to have sometimes problems reporting false positive tx unit hang during stressful UDP traffic. Likely other Intel drivers introduce responses to a tx hang. Update the 'tx hang' comparator with the comparison of the head and tail of ring pointers and restore the tx_timeout_factor to the previous value (one). This can be test by using netperf or iperf3 applications. Example: iperf3 -s -p 5001 iperf3 -c 192.168.0.2 --udp -p 5001 --time 600 -b 0 netserver -p 16604 netperf -H 192.168.0.2 -l 600 -p 16604 -t UDP_STREAM -- -m 64000 Fixes: b27b8dc77b5e ("igc: Increase timeout value for Speed 100/1000/2500") Signed-off-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Tony Nguyen Link: https://lore.kernel.org/r/20230206235818.662384-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/igc/igc_main.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 44b1740dc098..1dd2a7fee8d4 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) if (tx_buffer->next_to_watch && time_after(jiffies, tx_buffer->time_stamp + (adapter->tx_timeout_factor * HZ)) && - !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != + readl(tx_ring->tail))) { /* detected Tx unit hang */ netdev_err(tx_ring->netdev, "Detected Tx Unit Hang\n" @@ -5068,6 +5070,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +/** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + /** * igc_get_stats64 - Get System Network Statistics * @netdev: network interface device structure @@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work) case SPEED_100: case SPEED_1000: case SPEED_2500: - adapter->tx_timeout_factor = 7; + adapter->tx_timeout_factor = 1; break; } @@ -6320,6 +6340,7 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_rx_mode = igc_set_rx_mode, .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, .ndo_get_stats64 = igc_get_stats64, .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, -- cgit v1.2.3 From 21386e692613702502f7c982d81e0dfa86d25cfd Mon Sep 17 00:00:00 2001 From: Arınç ÜNAL Date: Sun, 5 Feb 2023 20:53:31 +0300 Subject: net: ethernet: mtk_eth_soc: enable special tag when any MAC uses DSA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The special tag is only enabled when the first MAC uses DSA. However, it must be enabled when any MAC uses DSA. Change the check accordingly. This fixes hardware DSA untagging not working on the second MAC of the MT7621 and MT7623 SoCs, and likely other SoCs too. Therefore, remove the check that disables hardware DSA untagging for the second MAC of the MT7621 and MT7623 SoCs. Fixes: a1f47752fd62 ("net: ethernet: mtk_eth_soc: disable hardware DSA untagging for second MAC") Co-developed-by: Richard van Schagen Signed-off-by: Richard van Schagen Signed-off-by: Arınç ÜNAL Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d56eda6397a4..d73e69ed3e95 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3111,7 +3111,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config) val |= config; - if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0])) + if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i])) val |= MTK_GDMA_SPECIAL_TAG; mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); @@ -3177,8 +3177,7 @@ static int mtk_open(struct net_device *dev) struct mtk_eth *eth = mac->hw; int i, err; - if ((mtk_uses_dsa(dev) && !eth->prog) && - !(mac->id == 1 && MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_TRGMII))) { + if (mtk_uses_dsa(dev) && !eth->prog) { for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { struct metadata_dst *md_dst = eth->dsa_meta[i]; @@ -3195,8 +3194,7 @@ static int mtk_open(struct net_device *dev) } } else { /* Hardware special tag parsing needs to be disabled if at least - * one MAC does not use DSA, or the second MAC of the MT7621 and - * MT7623 SoCs is being used. + * one MAC does not use DSA. */ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); val &= ~MTK_CDMP_STAG_EN; -- cgit v1.2.3 From c966153d120222cd4e85e1e1601584d7d4d91dcb Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 6 Feb 2023 22:47:03 +0200 Subject: net: ethernet: mtk_eth_soc: fix wrong parameters order in __xdp_rxq_info_reg() Parameters 'queue_index' and 'napi_id' are passed in a swapped order. Fix it here. Fixes: 23233e577ef9 ("net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers") Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d73e69ed3e95..8f61e3b052c6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1570,8 +1570,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, if (IS_ERR(pp)) return pp; - err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, eth->rx_napi.napi_id, - id, PAGE_SIZE); + err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, + eth->rx_napi.napi_id, PAGE_SIZE); if (err < 0) goto err_free_pp; -- cgit v1.2.3 From 821de68c1f9c0236b0b9c10834cda900ae9b443c Mon Sep 17 00:00:00 2001 From: Yu Xiao Date: Tue, 7 Feb 2023 11:16:50 +0100 Subject: nfp: ethtool: fix the bug of setting unsupported port speed Unsupported port speed can be set and cause error. Now fixing it and return an error if setting unsupported speed. This fix depends on the following, which was included in v6.2-rc1: commit a61474c41e8c ("nfp: ethtool: support reporting link modes"). Fixes: 7c698737270f ("nfp: add support for .set_link_ksettings()") Signed-off-by: Yu Xiao Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 194 +++++++++++++++++---- drivers/net/ethernet/netronome/nfp/nfp_port.h | 12 ++ 2 files changed, 170 insertions(+), 36 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index a4a89ef3f18b..cc97b3d00414 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -293,35 +293,131 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port, } } -static const u16 nfp_eth_media_table[] = { - [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, - [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, - [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, - [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, - [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, - [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, - [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, +static const struct nfp_eth_media_link_mode { + u16 ethtool_link_mode; + u16 speed; +} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = { + [NFP_MEDIA_1000BASE_CX] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .speed = NFP_SPEED_1G, + }, + [NFP_MEDIA_1000BASE_KX] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .speed = NFP_SPEED_1G, + }, + [NFP_MEDIA_10GBASE_KX4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + .speed = NFP_SPEED_10G, + }, + [NFP_MEDIA_10GBASE_KR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + .speed = NFP_SPEED_10G, + }, + [NFP_MEDIA_10GBASE_CX4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + .speed = NFP_SPEED_10G, + }, + [NFP_MEDIA_10GBASE_CR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + .speed = NFP_SPEED_10G, + }, + [NFP_MEDIA_10GBASE_SR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + .speed = NFP_SPEED_10G, + }, + [NFP_MEDIA_10GBASE_ER] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + .speed = NFP_SPEED_10G, + }, + [NFP_MEDIA_25GBASE_KR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = NFP_SPEED_25G, + }, + [NFP_MEDIA_25GBASE_KR_S] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = NFP_SPEED_25G, + }, + [NFP_MEDIA_25GBASE_CR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = NFP_SPEED_25G, + }, + [NFP_MEDIA_25GBASE_CR_S] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = NFP_SPEED_25G, + }, + [NFP_MEDIA_25GBASE_SR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + .speed = NFP_SPEED_25G, + }, + [NFP_MEDIA_40GBASE_CR4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + .speed = NFP_SPEED_40G, + }, + [NFP_MEDIA_40GBASE_KR4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + .speed = NFP_SPEED_40G, + }, + [NFP_MEDIA_40GBASE_SR4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + .speed = NFP_SPEED_40G, + }, + [NFP_MEDIA_40GBASE_LR4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + .speed = NFP_SPEED_40G, + }, + [NFP_MEDIA_50GBASE_KR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + .speed = NFP_SPEED_50G, + }, + [NFP_MEDIA_50GBASE_SR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + .speed = NFP_SPEED_50G, + }, + [NFP_MEDIA_50GBASE_CR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + .speed = NFP_SPEED_50G, + }, + [NFP_MEDIA_50GBASE_LR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + .speed = NFP_SPEED_50G, + }, + [NFP_MEDIA_50GBASE_ER] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + .speed = NFP_SPEED_50G, + }, + [NFP_MEDIA_50GBASE_FR] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + .speed = NFP_SPEED_50G, + }, + [NFP_MEDIA_100GBASE_KR4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + .speed = NFP_SPEED_100G, + }, + [NFP_MEDIA_100GBASE_SR4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + .speed = NFP_SPEED_100G, + }, + [NFP_MEDIA_100GBASE_CR4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + .speed = NFP_SPEED_100G, + }, + [NFP_MEDIA_100GBASE_KP4] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + .speed = NFP_SPEED_100G, + }, + [NFP_MEDIA_100GBASE_CR10] = { + .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + .speed = NFP_SPEED_100G, + }, +}; + +static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = { + [NFP_SPEED_1G] = SPEED_1000, + [NFP_SPEED_10G] = SPEED_10000, + [NFP_SPEED_25G] = SPEED_25000, + [NFP_SPEED_40G] = SPEED_40000, + [NFP_SPEED_50G] = SPEED_50000, + [NFP_SPEED_100G] = SPEED_100000, }; static void nfp_add_media_link_mode(struct nfp_port *port, @@ -334,8 +430,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port, }; struct nfp_cpp *cpp = port->app->cpp; - if (nfp_eth_read_media(cpp, ðm)) + if (nfp_eth_read_media(cpp, ðm)) { + bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER); return; + } + + bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER); for (u32 i = 0; i < 2; i++) { supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]); @@ -344,20 +444,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port, for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) { if (i < 64) { - if (supported_modes[0] & BIT_ULL(i)) - __set_bit(nfp_eth_media_table[i], + if (supported_modes[0] & BIT_ULL(i)) { + __set_bit(nfp_eth_media_table[i].ethtool_link_mode, cmd->link_modes.supported); + __set_bit(nfp_eth_media_table[i].speed, + port->speed_bitmap); + } if (advertised_modes[0] & BIT_ULL(i)) - __set_bit(nfp_eth_media_table[i], + __set_bit(nfp_eth_media_table[i].ethtool_link_mode, cmd->link_modes.advertising); } else { - if (supported_modes[1] & BIT_ULL(i - 64)) - __set_bit(nfp_eth_media_table[i], + if (supported_modes[1] & BIT_ULL(i - 64)) { + __set_bit(nfp_eth_media_table[i].ethtool_link_mode, cmd->link_modes.supported); + __set_bit(nfp_eth_media_table[i].speed, + port->speed_bitmap); + } if (advertised_modes[1] & BIT_ULL(i - 64)) - __set_bit(nfp_eth_media_table[i], + __set_bit(nfp_eth_media_table[i].ethtool_link_mode, cmd->link_modes.advertising); } } @@ -468,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev, if (cmd->base.speed != SPEED_UNKNOWN) { u32 speed = cmd->base.speed / eth_port->lanes; + bool is_supported = false; + + for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) { + if (cmd->base.speed == nfp_eth_speed_map[i] && + test_bit(i, port->speed_bitmap)) { + is_supported = true; + break; + } + } + + if (!is_supported) { + netdev_err(netdev, "Speed %u is not supported.\n", + cmd->base.speed); + err = -EINVAL; + goto err_bad_set; + } if (req_aneg) { netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index f8cd157ca1d7..9c04f9f0e2c9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -38,6 +38,16 @@ enum nfp_port_flags { NFP_PORT_CHANGED = 0, }; +enum { + NFP_SPEED_1G, + NFP_SPEED_10G, + NFP_SPEED_25G, + NFP_SPEED_40G, + NFP_SPEED_50G, + NFP_SPEED_100G, + NFP_SUP_SPEED_NUMBER +}; + /** * struct nfp_port - structure representing NFP port * @netdev: backpointer to associated netdev @@ -52,6 +62,7 @@ enum nfp_port_flags { * @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available + * @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id * @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC @@ -78,6 +89,7 @@ struct nfp_port { bool eth_forced; struct nfp_eth_table_port *eth_port; u8 __iomem *eth_stats; + DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER); }; /* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */ struct { -- cgit v1.2.3 From 1a3245fe0cf84e630598da4ab110a5f8a2d6730d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 7 Feb 2023 12:30:27 +0200 Subject: net: ethernet: mtk_eth_soc: fix DSA TX tag hwaccel for switch port 0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Arınç reports that on his MT7621AT Unielec U7621-06 board and MT7623NI Bananapi BPI-R2, packets received by the CPU over mt7530 switch port 0 (of which this driver acts as the DSA master) are not processed correctly by software. More precisely, they arrive without a DSA tag (in packet or in the hwaccel area - skb_metadata_dst()), so DSA cannot demux them towards the switch's interface for port 0. Traffic from other ports receives a skb_metadata_dst() with the correct port and is demuxed properly. Looking at mtk_poll_rx(), it becomes apparent that this driver uses the skb vlan hwaccel area: union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; as a temporary storage for the VLAN hwaccel tag, or the DSA hwaccel tag. If this is a DSA master it's a DSA hwaccel tag, and finally clears up the skb VLAN hwaccel header. I'm guessing that the problem is the (mis)use of API. skb_vlan_tag_present() looks like this: #define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all) So if both vlan_proto and vlan_tci are zeroes, skb_vlan_tag_present() returns precisely false. I don't know for sure what is the format of the DSA hwaccel tag, but I surely know that lowermost 3 bits of vlan_proto are 0 when receiving from port 0: unsigned int port = vlan_proto & GENMASK(2, 0); If the RX descriptor has no other bits set to non-zero values in RX_DMA_VTAG, then the call to __vlan_hwaccel_put_tag() will not, in fact, make the subsequent skb_vlan_tag_present() return true, because it's implemented like this: static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { skb->vlan_proto = vlan_proto; skb->vlan_tci = vlan_tci; } What we need to do to fix this problem (assuming this is the problem) is to stop using skb->vlan_all as temporary storage for driver affairs, and just create some local variables that serve the same purpose, but hopefully better. Instead of calling skb_vlan_tag_present(), let's look at a boolean has_hwaccel_tag which we set to true when the RX DMA descriptors have something. Disambiguate based on netdev_uses_dsa() whether this is a VLAN or DSA hwaccel tag, and only call __vlan_hwaccel_put_tag() if we're certain it's a VLAN tag. Arınç confirms that the treatment works, so this validates the assumption. Link: https://lore.kernel.org/netdev/704f3a72-fc9e-714a-db54-272e17612637@arinc9.com/ Fixes: 2d7605a72906 ("net: ethernet: mtk_eth_soc: enable hardware DSA untagging") Reported-by: Arınç ÜNAL Tested-by: Arınç ÜNAL Signed-off-by: Vladimir Oltean Reviewed-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 8f61e3b052c6..e3123723522e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1870,7 +1870,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, while (done < budget) { unsigned int pktlen, *rxdcsum; + bool has_hwaccel_tag = false; struct net_device *netdev; + u16 vlan_proto, vlan_tci; dma_addr_t dma_addr; u32 hash, reason; int mac = 0; @@ -2010,27 +2012,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { - if (trxd.rxd3 & RX_DMA_VTAG_V2) - __vlan_hwaccel_put_tag(skb, - htons(RX_DMA_VPID(trxd.rxd4)), - RX_DMA_VID(trxd.rxd4)); + if (trxd.rxd3 & RX_DMA_VTAG_V2) { + vlan_proto = RX_DMA_VPID(trxd.rxd4); + vlan_tci = RX_DMA_VID(trxd.rxd4); + has_hwaccel_tag = true; + } } else if (trxd.rxd2 & RX_DMA_VTAG) { - __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)), - RX_DMA_VID(trxd.rxd3)); + vlan_proto = RX_DMA_VPID(trxd.rxd3); + vlan_tci = RX_DMA_VID(trxd.rxd3); + has_hwaccel_tag = true; } } /* When using VLAN untagging in combination with DSA, the * hardware treats the MTK special tag as a VLAN and untags it. */ - if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) { - unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0); + if (has_hwaccel_tag && netdev_uses_dsa(netdev)) { + unsigned int port = vlan_proto & GENMASK(2, 0); if (port < ARRAY_SIZE(eth->dsa_meta) && eth->dsa_meta[port]) skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); - - __vlan_hwaccel_clear_tag(skb); + } else if (has_hwaccel_tag) { + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci); } skb_record_rx_queue(skb, 0); -- cgit v1.2.3 From 2fcde9fe258ec8b88d41def38e43ca4da32c0a9a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 7 Feb 2023 20:31:17 +0200 Subject: net: mscc: ocelot: fix all IPv6 getting trapped to CPU when PTP timestamping is used While running this selftest which usually passes: ~/selftests/drivers/net/dsa# ./local_termination.sh eno0 swp0 TEST: swp0: Unicast IPv4 to primary MAC address [ OK ] TEST: swp0: Unicast IPv4 to macvlan MAC address [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address, promisc [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address, allmulti [ OK ] TEST: swp0: Multicast IPv4 to joined group [ OK ] TEST: swp0: Multicast IPv4 to unknown group [ OK ] TEST: swp0: Multicast IPv4 to unknown group, promisc [ OK ] TEST: swp0: Multicast IPv4 to unknown group, allmulti [ OK ] TEST: swp0: Multicast IPv6 to joined group [ OK ] TEST: swp0: Multicast IPv6 to unknown group [ OK ] TEST: swp0: Multicast IPv6 to unknown group, promisc [ OK ] TEST: swp0: Multicast IPv6 to unknown group, allmulti [ OK ] if I start PTP timestamping then run it again (debug prints added by me), the unknown IPv6 MC traffic is seen by the CPU port even when it should have been dropped: ~/selftests/drivers/net/dsa# ptp4l -i swp0 -2 -P -m ptp4l[225.410]: selected /dev/ptp1 as PTP clock [ 225.445746] mscc_felix 0000:00:00.5: ocelot_l2_ptp_trap_add: port 0 adding L2 PTP trap [ 225.453815] mscc_felix 0000:00:00.5: ocelot_ipv4_ptp_trap_add: port 0 adding IPv4 PTP event trap [ 225.462703] mscc_felix 0000:00:00.5: ocelot_ipv4_ptp_trap_add: port 0 adding IPv4 PTP general trap [ 225.471768] mscc_felix 0000:00:00.5: ocelot_ipv6_ptp_trap_add: port 0 adding IPv6 PTP event trap [ 225.480651] mscc_felix 0000:00:00.5: ocelot_ipv6_ptp_trap_add: port 0 adding IPv6 PTP general trap ptp4l[225.488]: port 1: INITIALIZING to LISTENING on INIT_COMPLETE ptp4l[225.488]: port 0: INITIALIZING to LISTENING on INIT_COMPLETE ^C ~/selftests/drivers/net/dsa# ./local_termination.sh eno0 swp0 TEST: swp0: Unicast IPv4 to primary MAC address [ OK ] TEST: swp0: Unicast IPv4 to macvlan MAC address [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address, promisc [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address, allmulti [ OK ] TEST: swp0: Multicast IPv4 to joined group [ OK ] TEST: swp0: Multicast IPv4 to unknown group [ OK ] TEST: swp0: Multicast IPv4 to unknown group, promisc [ OK ] TEST: swp0: Multicast IPv4 to unknown group, allmulti [ OK ] TEST: swp0: Multicast IPv6 to joined group [ OK ] TEST: swp0: Multicast IPv6 to unknown group [FAIL] reception succeeded, but should have failed TEST: swp0: Multicast IPv6 to unknown group, promisc [ OK ] TEST: swp0: Multicast IPv6 to unknown group, allmulti [ OK ] The PGID_MCIPV6 is configured correctly to not flood to the CPU, I checked that. Furthermore, when I disable back PTP RX timestamping (ptp4l doesn't do that when it exists), packets are RX filtered again as they should be: ~/selftests/drivers/net/dsa# hwstamp_ctl -i swp0 -r 0 [ 218.202854] mscc_felix 0000:00:00.5: ocelot_l2_ptp_trap_del: port 0 removing L2 PTP trap [ 218.212656] mscc_felix 0000:00:00.5: ocelot_ipv4_ptp_trap_del: port 0 removing IPv4 PTP event trap [ 218.222975] mscc_felix 0000:00:00.5: ocelot_ipv4_ptp_trap_del: port 0 removing IPv4 PTP general trap [ 218.233133] mscc_felix 0000:00:00.5: ocelot_ipv6_ptp_trap_del: port 0 removing IPv6 PTP event trap [ 218.242251] mscc_felix 0000:00:00.5: ocelot_ipv6_ptp_trap_del: port 0 removing IPv6 PTP general trap current settings: tx_type 1 rx_filter 12 new settings: tx_type 1 rx_filter 0 ~/selftests/drivers/net/dsa# ./local_termination.sh eno0 swp0 TEST: swp0: Unicast IPv4 to primary MAC address [ OK ] TEST: swp0: Unicast IPv4 to macvlan MAC address [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address, promisc [ OK ] TEST: swp0: Unicast IPv4 to unknown MAC address, allmulti [ OK ] TEST: swp0: Multicast IPv4 to joined group [ OK ] TEST: swp0: Multicast IPv4 to unknown group [ OK ] TEST: swp0: Multicast IPv4 to unknown group, promisc [ OK ] TEST: swp0: Multicast IPv4 to unknown group, allmulti [ OK ] TEST: swp0: Multicast IPv6 to joined group [ OK ] TEST: swp0: Multicast IPv6 to unknown group [ OK ] TEST: swp0: Multicast IPv6 to unknown group, promisc [ OK ] TEST: swp0: Multicast IPv6 to unknown group, allmulti [ OK ] So it's clear that something in the PTP RX trapping logic went wrong. Looking a bit at the code, I can see that there are 4 typos, which populate "ipv4" VCAP IS2 key filter fields for IPv6 keys. VCAP IS2 keys of type OCELOT_VCAP_KEY_IPV4 and OCELOT_VCAP_KEY_IPV6 are handled by is2_entry_set(). OCELOT_VCAP_KEY_IPV4 looks at &filter->key.ipv4, and OCELOT_VCAP_KEY_IPV6 at &filter->key.ipv6. Simply put, when we populate the wrong key field, &filter->key.ipv6 fields "proto.mask" and "proto.value" remain all zeroes (or "don't care"). So is2_entry_set() will enter the "else" of this "if" condition: if (msk == 0xff && (val == IPPROTO_TCP || val == IPPROTO_UDP)) and proceed to ignore the "proto" field. The resulting rule will match on all IPv6 traffic, trapping it to the CPU. This is the reason why the local_termination.sh selftest sees it, because control traps are stronger than the PGID_MCIPV6 used for flooding (from the forwarding data path). But the problem is in fact much deeper. We trap all IPv6 traffic to the CPU, but if we're bridged, we set skb->offload_fwd_mark = 1, so software forwarding will not take place and IPv6 traffic will never reach its destination. The fix is simple - correct the typos. I was intentionally inaccurate in the commit message about the breakage occurring when any PTP timestamping is enabled. In fact it only happens when L4 timestamping is requested (HWTSTAMP_FILTER_PTP_V2_EVENT or HWTSTAMP_FILTER_PTP_V2_L4_EVENT). But ptp4l requests a larger RX timestamping filter than it needs for "-2": HWTSTAMP_FILTER_PTP_V2_EVENT. I wanted people skimming through git logs to not think that the bug doesn't affect them because they only use ptp4l in L2 mode. Fixes: 96ca08c05838 ("net: mscc: ocelot: set up traps for PTP packets") Signed-off-by: Vladimir Oltean Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230207183117.1745754-1-vladimir.oltean@nxp.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/mscc/ocelot_ptp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index 1a82f10c8853..2180ae94c744 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -335,8 +335,8 @@ static void ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) { trap->key_type = OCELOT_VCAP_KEY_IPV6; - trap->key.ipv4.proto.value[0] = IPPROTO_UDP; - trap->key.ipv4.proto.mask[0] = 0xff; + trap->key.ipv6.proto.value[0] = IPPROTO_UDP; + trap->key.ipv6.proto.mask[0] = 0xff; trap->key.ipv6.dport.value = PTP_EV_PORT; trap->key.ipv6.dport.mask = 0xffff; } @@ -355,8 +355,8 @@ static void ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) { trap->key_type = OCELOT_VCAP_KEY_IPV6; - trap->key.ipv4.proto.value[0] = IPPROTO_UDP; - trap->key.ipv4.proto.mask[0] = 0xff; + trap->key.ipv6.proto.value[0] = IPPROTO_UDP; + trap->key.ipv6.proto.mask[0] = 0xff; trap->key.ipv6.dport.value = PTP_GEN_PORT; trap->key.ipv6.dport.mask = 0xffff; } -- cgit v1.2.3