diff options
Diffstat (limited to 'drivers/net/ethernet')
42 files changed, 374 insertions, 212 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 18956e7604a3..a70bb1bb90e7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter) rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); if (rc) dev_err(&adapter->pdev->dev, "Device reset failed\n"); + /* stop submitting admin commands on a device that was reset */ + ena_com_set_admin_running_state(adapter->ena_dev, false); } ena_destroy_all_io_queues(adapter); @@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev) netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); + if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) + return 0; + if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); @@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) ena_down(adapter); /* Stop the device from sending AENQ events (in case reset flag is set - * and device is up, ena_close already reset the device - * In case the reset flag is set and the device is up, ena_down() - * already perform the reset, so it can be skipped. + * and device is up, ena_down() already reset the device. */ if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); @@ -2694,8 +2697,8 @@ err_device_destroy: ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); - ena_com_mmio_reg_read_request_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); + ena_com_mmio_reg_read_request_destroy(ena_dev); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); @@ -3452,6 +3455,8 @@ err_rss: ena_com_rss_destroy(ena_dev); err_free_msix: ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); + /* stop submitting admin commands on a device that was reset */ + ena_com_set_admin_running_state(ena_dev, false); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_worker_destroy: @@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); - unregister_netdev(netdev); - - /* If the device is running then we want to make sure the device will be - * reset to make sure no more events will be issued by the device. - */ - if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); - rtnl_lock(); ena_destroy_device(adapter, true); rtnl_unlock(); + unregister_netdev(netdev); + free_netdev(netdev); ena_com_rss_destroy(ena_dev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 521873642339..dc8b6173d8d8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -45,7 +45,7 @@ #define DRV_MODULE_VER_MAJOR 2 #define DRV_MODULE_VER_MINOR 0 -#define DRV_MODULE_VER_SUBMINOR 1 +#define DRV_MODULE_VER_SUBMINOR 2 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index b4fc0ed5bce8..9d4899826823 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op, prop = of_get_property(nd, "tpe-link-test?", NULL); if (!prop) - goto no_link_test; + goto node_put; if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " @@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op, "to ecd@skynet.be\n"); auxio_set_lte(AUXIO_LTE_ON); } +node_put: + of_node_put(nd); no_link_test: lp->auto_select = 1; lp->tpe = 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index be1506169076..0de487a8f0eb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ E1HVN_MAX) +/* Following is the DMAE channel number allocation for the clients. + * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively. + * Driver: 0-3 and 8-11 (for PF dmae operations) + * 4 and 12 (for stats requests) + */ +#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */ + /* PCIE link and speed */ #define PCICFG_LINK_WIDTH 0x1f00000 #define PCICFG_LINK_WIDTH_SHIFT 20 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 3f4d2c8da21a..a9eaaf3e73a4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; + rdata->dmae_cmd_id = BNX2X_FW_DMAE_C; rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dd85d790f638..d4c300117529 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else { if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { if (dev->features & NETIF_F_RXCSUM) - cpr->rx_l4_csum_errors++; + bnapi->cp_ring.rx_l4_csum_errors++; } } @@ -8714,6 +8714,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } +static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, + u32 ring_id, u32 *prod, u32 *cons) +{ + struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_dbg_ring_info_get_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); + req.ring_type = ring_type; + req.fw_ring_id = cpu_to_le32(ring_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *prod = le32_to_cpu(resp->producer_index); + *cons = le32_to_cpu(resp->consumer_index); + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; @@ -8821,6 +8841,11 @@ static void bnxt_timer(struct timer_list *t) bnxt_queue_sp_work(bp); } } + + if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) { + set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -8851,6 +8876,44 @@ static void bnxt_reset(struct bnxt *bp, bool silent) bnxt_rtnl_unlock_sp(bp); } +static void bnxt_chk_missed_irq(struct bnxt *bp) +{ + int i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + u32 fw_ring_id; + int j; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + u32 val[2]; + + if (!cpr2 || cpr2->has_more_work || + !bnxt_has_work(bp, cpr2)) + continue; + + if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { + cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; + continue; + } + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[0], &val[1]); + cpr->missed_irqs++; + } + } +} + static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) @@ -8930,6 +8993,9 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) bnxt_tc_flow_stats_work(bp); + if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) + bnxt_chk_missed_irq(bp); + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They * must be the last functions to be called before exiting. */ @@ -10087,6 +10153,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); bnxt_ethtool_init(bp); bnxt_dcb_init(bp); @@ -10120,7 +10187,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; } - bnxt_hwrm_vnic_qcaps(bp); if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 498b373c992d..9e99d4ab3e06 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -798,6 +798,8 @@ struct bnxt_cp_ring_info { u8 had_work_done:1; u8 has_more_work:1; + u32 last_cp_raw_cons; + struct bnxt_coal rx_ring_coal; u64 rx_packets; u64 rx_bytes; @@ -816,6 +818,7 @@ struct bnxt_cp_ring_info { dma_addr_t hw_stats_map; u32 hw_stats_ctx_id; u64 rx_l4_csum_errors; + u64 missed_irqs; struct bnxt_ring_struct cp_ring_struct; @@ -1527,6 +1530,7 @@ struct bnxt { #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 #define BNXT_FLOW_STATS_SP_EVENT 15 #define BNXT_UPDATE_PHY_SP_EVENT 16 +#define BNXT_RING_COAL_NOW_SP_EVENT 17 struct bnxt_hw_resc hw_resc; struct bnxt_pf_info pf; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 48078564f025..6cc69a58478a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,7 +137,7 @@ reset_coalesce: return rc; } -#define BNXT_NUM_STATS 21 +#define BNXT_NUM_STATS 22 #define BNXT_RX_STATS_ENTRY(counter) \ { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } @@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (k = 0; k < stat_fields; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; + buf[j++] = cpr->missed_irqs; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += le64_to_cpu(cpr->hw_stats->rx_discard_pkts); @@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) buf += ETH_GSTRING_LEN; sprintf(buf, "[%d]: rx_l4_csum_errors", i); buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: missed_irqs", i); + buf += ETH_GSTRING_LEN; } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { strcpy(buf, bnxt_sw_func_stats[i].string); @@ -2942,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->asic_state = 0; strlcpy(record->system_name, utsname()->nodename, sizeof(record->system_name)); - record->year = cpu_to_le16(tm.tm_year); - record->month = cpu_to_le16(tm.tm_mon); + record->year = cpu_to_le16(tm.tm_year + 1900); + record->month = cpu_to_le16(tm.tm_mon + 1); record->day = cpu_to_le16(tm.tm_mday); record->hour = cpu_to_le16(tm.tm_hour); record->minute = cpu_to_le16(tm.tm_min); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index beee61292d5e..b59b382d34f9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, if (ulp_id == BNXT_ROCE_ULP) { unsigned int max_stat_ctxs; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return -EOPNOTSUPP; + max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || bp->num_stat_ctxs == max_stat_ctxs) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 89295306f161..432c3b867084 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12422,6 +12422,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e { struct tg3 *tp = netdev_priv(dev); int i, irq_sync = 0, err = 0; + bool reset_phy = false; if ((ering->rx_pending > tp->rx_std_ring_mask) || (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || @@ -12453,7 +12454,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, false); + /* Reset PHY to avoid PHY lock up */ + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + reset_phy = true; + + err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); } @@ -12487,6 +12494,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam { struct tg3 *tp = netdev_priv(dev); int err = 0; + bool reset_phy = false; if (tp->link_config.autoneg == AUTONEG_ENABLE) tg3_warn_mgmt_link_flap(tp); @@ -12556,7 +12564,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, false); + /* Reset PHY to avoid PHY lock up */ + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + reset_phy = true; + + err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 768f584f8392..88f8a8fa93cd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) bool if_up = netif_running(nic->netdev); struct bpf_prog *old_prog; bool bpf_attached = false; + int ret = 0; /* For now just support only the usual MTU sized frames */ if (prog && (dev->mtu > 1500)) { @@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) if (nic->xdp_prog) { /* Attach BPF program */ nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); - if (!IS_ERR(nic->xdp_prog)) + if (!IS_ERR(nic->xdp_prog)) { bpf_attached = true; + } else { + ret = PTR_ERR(nic->xdp_prog); + nic->xdp_prog = NULL; + } } /* Calculate Tx queues needed for XDP and network stack */ @@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) netif_trans_update(nic->netdev); } - return 0; + return ret; } static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 187a249ff2d1..fcaf18fa3904 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) if (!sq->dmem.base) return; - if (sq->tso_hdrs) + if (sq->tso_hdrs) { dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len * TSO_HEADER_SIZE, sq->tso_hdrs, sq->tso_hdrs_phys); + sq->tso_hdrs = NULL; + } /* Free pending skbs in the queue */ smp_rmb(); diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 75c1c5ed2387..e2cdfa75673f 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -67,7 +67,6 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) - depends on THERMAL || !THERMAL select FW_LOADER select MDIO select ZLIB_DEFLATE diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 78e5d17a1d5f..91d8a885deba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -12,6 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o -ifdef CONFIG_THERMAL -cxgb4-objs += cxgb4_thermal.o -endif +cxgb4-$(CONFIG_THERMAL) += cxgb4_thermal.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 05a46926016a..d49db46254cd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5863,7 +5863,7 @@ fw_attach_fail: if (!is_t4(adapter->params.chip)) cxgb4_ptp_init(adapter); - if (IS_ENABLED(CONFIG_THERMAL) && + if (IS_REACHABLE(CONFIG_THERMAL) && !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) cxgb4_thermal_init(adapter); @@ -5932,7 +5932,7 @@ static void remove_one(struct pci_dev *pdev) if (!is_t4(adapter->params.chip)) cxgb4_ptp_stop(adapter); - if (IS_ENABLED(CONFIG_THERMAL)) + if (IS_REACHABLE(CONFIG_THERMAL)) cxgb4_thermal_remove(adapter); /* If we allocated filters, free up state associated with any diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index ceec467f590d..949103db8a8a 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, u64_stats_update_begin(&port->tx_stats_syncp); port->tx_frag_stats[nfrags]++; - u64_stats_update_end(&port->ir_stats_syncp); + u64_stats_update_end(&port->tx_stats_syncp); } } diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 570caeb8ee9e..084f24daf2b5 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) struct net_device *netdev = dev_id; struct ftmac100 *priv = netdev_priv(netdev); - if (likely(netif_running(netdev))) { - /* Disable interrupts for polling */ - ftmac100_disable_all_int(priv); + /* Disable interrupts for polling */ + ftmac100_disable_all_int(priv); + if (likely(netif_running(netdev))) napi_schedule(&priv->napi); - } return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c9d5d0a7fbf1..c0203a0d5e3b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) for (j = 0; j < rx_pool->size; j++) { if (rx_pool->rx_buff[j].skb) { - dev_kfree_skb_any(rx_pool->rx_buff[i].skb); - rx_pool->rx_buff[i].skb = NULL; + dev_kfree_skb_any(rx_pool->rx_buff[j].skb); + rx_pool->rx_buff[j].skb = NULL; } } @@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev) return 0; } - mutex_lock(&adapter->reset_lock); - if (adapter->state != VNIC_CLOSED) { rc = ibmvnic_login(netdev); - if (rc) { - mutex_unlock(&adapter->reset_lock); + if (rc) return rc; - } rc = init_resources(adapter); if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); - mutex_unlock(&adapter->reset_lock); return rc; } } @@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev) rc = __ibmvnic_open(netdev); netif_carrier_on(netdev); - mutex_unlock(&adapter->reset_lock); - return rc; } @@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev) return 0; } - mutex_lock(&adapter->reset_lock); rc = __ibmvnic_close(netdev); ibmvnic_cleanup(netdev); - mutex_unlock(&adapter->reset_lock); return rc; } @@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct ibmvnic_rwi *rwi, u32 reset_state) { u64 old_num_rx_queues, old_num_tx_queues; + u64 old_num_rx_slots, old_num_tx_slots; struct net_device *netdev = adapter->netdev; int i, rc; @@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, old_num_rx_queues = adapter->req_rx_queues; old_num_tx_queues = adapter->req_tx_queues; + old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; + old_num_tx_slots = adapter->req_tx_entries_per_subcrq; ibmvnic_cleanup(netdev); @@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) return rc; } else if (adapter->req_rx_queues != old_num_rx_queues || - adapter->req_tx_queues != old_num_tx_queues) { - adapter->map_id = 1; + adapter->req_tx_queues != old_num_tx_queues || + adapter->req_rx_add_entries_per_subcrq != + old_num_rx_slots || + adapter->req_tx_entries_per_subcrq != + old_num_tx_slots) { release_rx_pools(adapter); release_tx_pools(adapter); - rc = init_rx_pools(netdev); - if (rc) - return rc; - rc = init_tx_pools(netdev); - if (rc) - return rc; - release_napi(adapter); - rc = init_napi(adapter); + release_vpd_data(adapter); + + rc = init_resources(adapter); if (rc) return rc; + } else { rc = reset_tx_pools(adapter); if (rc) @@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, adapter->state = VNIC_PROBED; return 0; } - /* netif_set_real_num_xx_queues needs to take rtnl lock here - * unless wait_for_reset is set, in which case the rtnl lock - * has already been taken before initializing the reset - */ - if (!adapter->wait_for_reset) { - rtnl_lock(); - rc = init_resources(adapter); - rtnl_unlock(); - } else { - rc = init_resources(adapter); - } + + rc = init_resources(adapter); if (rc) return rc; @@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work) struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; struct net_device *netdev; + bool we_lock_rtnl = false; u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); netdev = adapter->netdev; - mutex_lock(&adapter->reset_lock); + /* netif_set_real_num_xx_queues needs to take rtnl lock here + * unless wait_for_reset is set, in which case the rtnl lock + * has already been taken before initializing the reset + */ + if (!adapter->wait_for_reset) { + rtnl_lock(); + we_lock_rtnl = true; + } reset_state = adapter->state; rwi = get_next_rwi(adapter); @@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work) if (rc) { netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); - mutex_unlock(&adapter->reset_lock); - return; } adapter->resetting = false; - mutex_unlock(&adapter->reset_lock); + if (we_lock_rtnl) + rtnl_unlock(); } static int ibmvnic_reset(struct ibmvnic_adapter *adapter, @@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_LIST_HEAD(&adapter->rwi_list); - mutex_init(&adapter->reset_lock); mutex_init(&adapter->rwi_lock); adapter->resetting = false; @@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); adapter->state = VNIC_REMOVING; - unregister_netdev(netdev); - mutex_lock(&adapter->reset_lock); + rtnl_lock(); + unregister_netdevice(netdev); release_resources(adapter); release_sub_crqs(adapter, 1); @@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVED; - mutex_unlock(&adapter->reset_lock); + rtnl_unlock(); device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 18103b811d4d..99c4f8d331ce 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1075,7 +1075,7 @@ struct ibmvnic_adapter { struct tasklet_struct tasklet; enum vnic_state state; enum ibmvnic_reset_reason reset_reason; - struct mutex reset_lock, rwi_lock; + struct mutex rwi_lock; struct list_head rwi_list; struct work_struct ibmvnic_reset; bool resetting; diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 8c5ba4b81fb7..2d4d10a017e5 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -512,7 +512,8 @@ static int xrx200_probe(struct platform_device *pdev) err = register_netdev(net_dev); if (err) goto err_unprepare_clk; - return err; + + return 0; err_unprepare_clk: clk_disable_unprepare(priv->clk); @@ -520,7 +521,7 @@ err_unprepare_clk: err_uninit_dma: xrx200_hw_cleanup(priv); - return 0; + return err; } static int xrx200_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3ba672e9e353..e5397c8197b9 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3343,7 +3343,6 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, if (state->interface != PHY_INTERFACE_MODE_NA && state->interface != PHY_INTERFACE_MODE_QSGMII && state->interface != PHY_INTERFACE_MODE_SGMII && - state->interface != PHY_INTERFACE_MODE_2500BASEX && !phy_interface_mode_is_8023z(state->interface) && !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -3357,14 +3356,9 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, /* Asymmetric pause is unsupported */ phylink_set(mask, Pause); - /* We cannot use 1Gbps when using the 2.5G interface. */ - if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } else { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } + /* Half-duplex at speeds higher than 100Mbit is unsupported */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); if (!phy_interface_mode_is_8023z(state->interface)) { /* 10M and 100M are only supported in non-802.3z mode */ diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index deef5a998985..9af34e03892c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, int align, u32 skip_mask, u32 *puid) { - u32 uid; + u32 uid = 0; u32 res; struct mlx4_zone_allocator *zone_alloc = zone->allocator; struct mlx4_zone_entry *curr_node; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ebcd2778eeb3..23f1b5b512c2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -540,8 +540,8 @@ struct slave_list { struct resource_allocator { spinlock_t alloc_lock; /* protect quotas */ union { - int res_reserved; - int res_port_rsvd[MLX4_MAX_PORTS]; + unsigned int res_reserved; + unsigned int res_port_rsvd[MLX4_MAX_PORTS]; }; union { int res_free; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 2e84f10f59ba..1a11bc0e1612 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, buf); + (*mpt_entry)->lkey = 0; err = mlx4_SW2HW_MPT(dev, mailbox, key); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d7fbd5b6ac95..118324802926 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -569,6 +569,7 @@ struct mlx5e_rq { unsigned long state; int ix; + unsigned int hw_mtu; struct net_dim dim; /* Dynamic Interrupt Moderation */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 023dc4bccd28..4a37713023be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); *speed = mlx5e_port_ptys2speed(eth_proto_oper); - if (!(*speed)) { - mlx5_core_warn(mdev, "cannot get port speed\n"); + if (!(*speed)) err = -EINVAL; - } return err; } @@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm, case 40000: if (!write) *fec_policy = MLX5_GET(pplm_reg, pplm, - fec_override_cap_10g_40g); + fec_override_admin_10g_40g); else MLX5_SET(pplm_reg, pplm, fec_override_admin_10g_40g, *fec_policy); @@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, case 10000: case 40000: *fec_cap = MLX5_GET(pplm_reg, pplm, - fec_override_admin_10g_40g); + fec_override_cap_10g_40g); break; case 25000: *fec_cap = MLX5_GET(pplm_reg, pplm, @@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) { + u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC); bool fec_mode_not_supp_in_speed = false; - u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC); u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(pplm_reg); - u32 current_fec_speed; + u8 fec_policy_auto = 0; u8 fec_caps = 0; int err; int i; @@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) if (err) return err; - err = mlx5e_port_linkspeed(dev, ¤t_fec_speed); - if (err) - return err; + MLX5_SET(pplm_reg, out, local_port, 1); - memset(in, 0, sz); - MLX5_SET(pplm_reg, in, local_port, 1); - for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) { + for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) { mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); - /* policy supported for link speed */ - if (!!(fec_caps & fec_policy)) { - mlx5e_fec_admin_field(in, &fec_policy, 1, + /* policy supported for link speed, or policy is auto */ + if (fec_caps & fec_policy || fec_policy == fec_policy_auto) { + mlx5e_fec_admin_field(out, &fec_policy, 1, fec_supported_speeds[i]); } else { - if (fec_supported_speeds[i] == current_fec_speed) - return -EOPNOTSUPP; - mlx5e_fec_admin_field(in, &no_fec_policy, 1, - fec_supported_speeds[i]); + /* turn off FEC if supported. Else, leave it the same */ + if (fec_caps & fec_policy_nofec) + mlx5e_fec_admin_field(out, &fec_policy_nofec, 1, + fec_supported_speeds[i]); fec_mode_not_supp_in_speed = true; } } @@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) "FEC policy 0x%x is not supported for some speeds", fec_policy); - return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1); + return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index c047da8752da..eac245a93f91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) int err; err = mlx5e_port_linkspeed(priv->mdev, &speed); - if (err) + if (err) { + mlx5_core_warn(priv->mdev, "cannot get port speed\n"); return 0; + } xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3e770abfd802..25c1c4f96841 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); - err = get_fec_supported_advertised(mdev, link_ksettings); - if (err) + if (get_fec_supported_advertised(mdev, link_ksettings)) netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", __func__, err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1243edbedc9e..871313d6b34d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; @@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, int err; u32 i; + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); + if (err) + return err; + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, &cq->wq_ctrl); if (err) return err; - mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); - mcq->cqe_sz = 64; mcq->set_ci_db = cq->wq_ctrl.db.db; mcq->arm_db = cq->wq_ctrl.db.db + 1; @@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int eqn; int err; + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + if (err) + return err; + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); @@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); - mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); - MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); @@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, int err; int eqn; + err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); + if (err) + return err; + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; - mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); c->irq_desc = irq_to_desc(irq); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); @@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) return 0; } +#ifdef CONFIG_MLX5_ESWITCH static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) return 0; } +#endif static int set_feature_rx_all(struct net_device *netdev, bool enable) { @@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev, err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); +#ifdef CONFIG_MLX5_ESWITCH err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); +#endif err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); @@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, } if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params); u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); - reset = reset && (ppw_old != ppw_new); + reset = reset && (is_linear || (ppw_old != ppw_new)); } if (!reset) { @@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) FT_CAP(modify_root) && FT_CAP(identified_miss_table_mode) && FT_CAP(flow_table_modify)) { +#ifdef CONFIG_MLX5_ESWITCH netdev->hw_features |= NETIF_F_HW_TC; +#endif #ifdef CONFIG_MLX5_EN_ARFS netdev->hw_features |= NETIF_F_NTUPLE; #endif @@ -5004,11 +5019,21 @@ err_free_netdev: int mlx5e_attach_netdev(struct mlx5e_priv *priv) { const struct mlx5e_profile *profile; + int max_nch; int err; profile = priv->profile; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); + /* max number of channels may have changed */ + max_nch = mlx5e_get_max_num_channels(priv->mdev); + if (priv->channels.params.num_channels > max_nch) { + mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); + priv->channels.params.num_channels = max_nch; + mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, max_nch); + } + err = profile->init_tx(priv); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 79638dcbae78..16985ca3248d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u32 frag_size; bool consumed; + /* Check packet size. Note LRO doesn't use linear SKB */ + if (unlikely(cqe_bcnt > rq->hw_mtu)) { + rq->stats->oversize_pkts_sw_drop++; + return NULL; + } + va = page_address(di->page) + head_offset; data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 35ded91203f5..4382ef85488c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv) return 1; } -#ifdef CONFIG_INET -/* loopback test */ -#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) -static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; -#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL - struct mlx5ehdr { __be32 version; __be64 magic; - char text[ETH_GSTRING_LEN]; }; +#ifdef CONFIG_INET +/* loopback test */ +#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\ + sizeof(struct udphdr) + sizeof(struct mlx5ehdr)) +#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL + static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) { struct sk_buff *skb = NULL; @@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) struct ethhdr *ethh; struct udphdr *udph; struct iphdr *iph; - int datalen, iplen; - - datalen = MLX5E_TEST_PKT_SIZE - - (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph)); + int iplen; skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); if (!skb) { @@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) /* Fill UDP header */ udph->source = htons(9); udph->dest = htons(9); /* Discard Protocol */ - udph->len = htons(datalen + sizeof(struct udphdr)); + udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr)); udph->check = 0; /* Fill IP header */ @@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) iph->ttl = 32; iph->version = 4; iph->protocol = IPPROTO_UDP; - iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; + iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + + sizeof(struct mlx5ehdr); iph->tot_len = htons(iplen); iph->frag_off = 0; iph->saddr = 0; @@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) mlxh = skb_put(skb, sizeof(*mlxh)); mlxh->version = 0; mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); - strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text)); - datalen -= sizeof(*mlxh); - skb_put_zero(skb, datalen); skb->csum = 0; skb->ip_summed = CHECKSUM_PARTIAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1e55b9c27ffc..3e99d0728b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, @@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; @@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 77f74ce11280..3f8e870ef4c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -96,6 +96,7 @@ struct mlx5e_sw_stats { u64 rx_wqe_err; u64 rx_mpwqe_filler_cqes; u64 rx_mpwqe_filler_strides; + u64 rx_oversize_pkts_sw_drop; u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; @@ -193,6 +194,7 @@ struct mlx5e_rq_stats { u64 wqe_err; u64 mpwqe_filler_cqes; u64 mpwqe_filler_strides; + u64 oversize_pkts_sw_drop; u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 608025ca5c04..fca6f4132c91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, inner_headers); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_BASIC, f->key); - struct flow_dissector_key_eth_addrs *mask = + struct flow_dissector_key_basic *mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_BASIC, f->mask); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(key->n_proto)); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - dmac_47_16), - mask->dst); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - dmac_47_16), - key->dst); - - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - smac_47_16), - mask->src); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - smac_47_16), - key->src); - - if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) + if (mask->n_proto) *match_level = MLX5_MATCH_L2; } @@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L2; } - } else { + } else if (*match_level != MLX5_MATCH_NONE) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { @@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_ETH_ADDRS, f->key); - struct flow_dissector_key_basic *mask = + struct flow_dissector_key_eth_addrs *mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_ETH_ADDRS, f->mask); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(mask->n_proto)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(key->n_proto)); - if (mask->n_proto) + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dmac_47_16), + mask->dst); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dmac_47_16), + key->dst); + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + smac_47_16), + mask->src); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + smac_47_16), + key->src); + + if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) *match_level = MLX5_MATCH_L2; } @@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, /* the HW doesn't need L3 inline to match on frag=no */ if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) - *match_level = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; /* *** L2 attributes parsing up to here *** */ else - *match_level = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } } @@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) return -EOPNOTSUPP; - if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { NL_SET_ERR_MSG_MOD(extack, "current firmware doesn't support split rule for port mirroring"); netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 515e3d6de051..5a22c5874f3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule { }; static const struct rhashtable_params rhash_sa = { - .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), - .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), + /* Keep out "cmd" field from the key as it's + * value is not constant during the lifetime + * of the key object. + */ + .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), + .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), .automatic_shrinking = true, .min_size = 1, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index b59953daf8b4..11dabd62e2c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev) netif_carrier_off(epriv->netdev); mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); - mlx5i_uninit_underlay_qp(epriv); mlx5e_deactivate_priv_channels(epriv); mlx5e_close_channels(&epriv->channels); + mlx5i_uninit_underlay_qp(epriv); unlock: mutex_unlock(&epriv->state_lock); return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 8e8fa823d611..69966dfc6e3d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, p_data->arr[type].dont_add_vlan0 = true; /* QM reconf data */ - if (p_hwfn->hw_info.personality == personality) + if (app_tlv && p_hwfn->hw_info.personality == personality) qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); /* Configure dcbx vlan priority in doorbell block for roce EDPM */ @@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type) { enum qed_pci_personality personality; @@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, personality = qed_dcbx_app_update[i].personality; - qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable, prio, tc, type, personality); } } @@ -319,8 +319,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enable = true; } - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, - priority, tc, type); + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true, + enable, priority, tc, type); } } @@ -341,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, continue; enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable, priority, tc, type); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 7ceb2b97538d..88a8576ca9ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_iscsi_free(p_hwfn); qed_ooo_free(p_hwfn); } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + qed_rdma_info_free(p_hwfn); + qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); @@ -481,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info = &p_hwfn->qm_info; /* Can't have multiple flags set here */ - if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) + if (bitmap_weight((unsigned long *)&pq_flags, + sizeof(pq_flags) * BITS_PER_BYTE) > 1) { + DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); + goto err; + } + + if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { + DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); goto err; + } switch (pq_flags) { case PQ_FLAGS_RLS: @@ -506,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, } err: - DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); - return NULL; + return &qm_info->start_pq; } /* save pq index in qm info */ @@ -531,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) { u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); + if (max_tc == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_MCOS); + return p_hwfn->qm_info.start_pq; + } + if (tc > max_tc) DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); } u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) { u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); + if (max_vf == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_VFS); + return p_hwfn->qm_info.start_pq; + } + if (vf > max_vf) DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); } u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) @@ -1081,6 +1104,12 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; } + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + rc = qed_rdma_info_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); if (rc) @@ -2102,11 +2131,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) if (!p_ptt) return -EAGAIN; - /* If roce info is allocated it means roce is initialized and should - * be enabled in searcher. - */ if (p_hwfn->p_rdma_info && - p_hwfn->b_rdma_enabled_in_prs) + p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); /* Re-open incoming traffic */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 0f0aba793352..b22f464ea3fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) */ do { index = p_sb_attn->sb_index; + /* finish reading index before the loop condition */ + dma_rmb(); attn_bits = le32_to_cpu(p_sb_attn->atten_bits); attn_acks = le32_to_cpu(p_sb_attn->atten_ack); } while (index != p_sb_attn->sb_index); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 35fd0db6a677..fff7f04d4525 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1782,9 +1782,9 @@ static int qed_drain(struct qed_dev *cdev) return -EBUSY; } rc = qed_mcp_drain(hwfn, ptt); + qed_ptt_release(hwfn, ptt); if (rc) return rc; - qed_ptt_release(hwfn, ptt); } return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 62113438c880..7873d6dfd91f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; } -static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_rdma_start_in_params *params) +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info; - u32 num_cons, num_tasks; - int rc = -ENOMEM; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); - - /* Allocate a struct with current pf rdma info */ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); if (!p_rdma_info) - return rc; + return -ENOMEM; + + spin_lock_init(&p_rdma_info->lock); p_hwfn->p_rdma_info = p_rdma_info; + return 0; +} + +void qed_rdma_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_rdma_info); + p_hwfn->p_rdma_info = NULL; +} + +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 num_cons, num_tasks; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) p_rdma_info->proto = PROTOCOLID_IWARP; else @@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Allocate a struct with device params and fill it */ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); if (!p_rdma_info->dev) - goto free_rdma_info; + return rc; /* Allocate a struct with port params and fill it */ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); @@ -298,8 +310,6 @@ free_rdma_port: kfree(p_rdma_info->port); free_rdma_dev: kfree(p_rdma_info->dev); -free_rdma_info: - kfree(p_rdma_info); return rc; } @@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) kfree(p_rdma_info->port); kfree(p_rdma_info->dev); - - kfree(p_rdma_info); } static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) @@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); - spin_lock_init(&p_hwfn->p_rdma_info->lock); - qed_rdma_init_devinfo(p_hwfn, params); qed_rdma_init_port(p_hwfn); qed_rdma_init_events(p_hwfn, params); @@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt) /* Disable RoCE search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); p_hwfn->b_rdma_enabled_in_prs = false; - + p_hwfn->p_rdma_info->active = 0; qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); @@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt, u8 max_stats_queues; int rc; - if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { + if (!rdma_cxt || !in_params || !out_params || + !p_hwfn->p_rdma_info->active) { DP_ERR(p_hwfn->cdev, "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", rdma_cxt, in_params, out_params); @@ -1802,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; - /* if rdma info has not been allocated, naturally there are no qps */ - if (!p_hwfn->p_rdma_info) + /* if rdma wasn't activated yet, naturally there are no qps */ + if (!p_hwfn->p_rdma_info->active) return false; spin_lock_bh(&p_hwfn->p_rdma_info->lock); @@ -1849,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt, if (!p_ptt) goto err; - rc = qed_rdma_alloc(p_hwfn, p_ptt, params); + rc = qed_rdma_alloc(p_hwfn); if (rc) goto err1; @@ -1858,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt, goto err2; qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_rdma_info->active = 1; return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 6f722ee8ee94..3689fe3e5935 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -102,6 +102,7 @@ struct qed_rdma_info { u16 max_queue_zones; enum protocol_type proto; struct qed_iwarp_info iwarp; + u8 active:1; }; struct qed_rdma_qp { @@ -176,10 +177,14 @@ struct qed_rdma_qp { #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); +void qed_rdma_info_free(struct qed_hwfn *p_hwfn); #else static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;} +static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif int |