diff options
author | David S. Miller <davem@davemloft.net> | 2016-06-30 12:03:36 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-30 12:03:36 +0300 |
commit | ee58b57100ca953da7320c285315a95db2f7053d (patch) | |
tree | 77b815a31240adc4d6326346908137fc6c2c3a96 /drivers/net/ethernet | |
parent | 6f30e8b022c8e3a722928ddb1a2ae0be852fcc0e (diff) | |
parent | e7bdea7750eb2a64aea4a08fa5c0a31719c8155d (diff) | |
download | linux-ee58b57100ca953da7320c285315a95db2f7053d.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of overlapping changes, except the packet scheduler
conflicts which deal with the addition of the free list parameter
to qdisc_enqueue().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
36 files changed, 399 insertions, 343 deletions
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index e0fb0f1122db..20760e10211a 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev) * on the current MAC's MII bus */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) - if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) { - phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); + if (mdiobus_get_phy(aup->mii_bus, phy_addr)) { + phydev = mdiobus_get_phy(aup->mii_bus, phy_addr); if (!aup->phy_search_highest_addr) /* break out with first one found */ break; diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index d02c4240b7df..8fc93c5f6abc 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -96,10 +96,6 @@ struct alx_priv { unsigned int rx_ringsz; unsigned int rxbuf_size; - struct page *rx_page; - unsigned int rx_page_offset; - unsigned int rx_frag_size; - struct napi_struct napi; struct alx_tx_queue txq; struct alx_rx_queue rxq; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c98acdc0d14f..e708e360a9e3 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -70,35 +70,6 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry) } } -static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp) -{ - struct sk_buff *skb; - struct page *page; - - if (alx->rx_frag_size > PAGE_SIZE) - return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); - - page = alx->rx_page; - if (!page) { - alx->rx_page = page = alloc_page(gfp); - if (unlikely(!page)) - return NULL; - alx->rx_page_offset = 0; - } - - skb = build_skb(page_address(page) + alx->rx_page_offset, - alx->rx_frag_size); - if (likely(skb)) { - alx->rx_page_offset += alx->rx_frag_size; - if (alx->rx_page_offset >= PAGE_SIZE) - alx->rx_page = NULL; - else - get_page(page); - } - return skb; -} - - static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) { struct alx_rx_queue *rxq = &alx->rxq; @@ -115,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) while (!cur_buf->skb && next != rxq->read_idx) { struct alx_rfd *rfd = &rxq->rfd[cur]; - skb = alx_alloc_skb(alx, gfp); + /* + * When DMA RX address is set to something like + * 0x....fc0, it will be very likely to cause DMA + * RFD overflow issue. + * + * To work around it, we apply rx skb with 64 bytes + * longer space, and offset the address whenever + * 0x....fc0 is detected. + */ + skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); if (!skb) break; + + if (((unsigned long)skb->data & 0xfff) == 0xfc0) + skb_reserve(skb, 64); + dma = dma_map_single(&alx->hw.pdev->dev, skb->data, alx->rxbuf_size, DMA_FROM_DEVICE); @@ -153,7 +137,6 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); } - return count; } @@ -622,11 +605,6 @@ static void alx_free_rings(struct alx_priv *alx) kfree(alx->txq.bufs); kfree(alx->rxq.bufs); - if (alx->rx_page) { - put_page(alx->rx_page); - alx->rx_page = NULL; - } - dma_free_coherent(&alx->hw.pdev->dev, alx->descmem.size, alx->descmem.virt, @@ -681,7 +659,6 @@ static int alx_request_irq(struct alx_priv *alx) alx->dev->name, alx); if (!err) goto out; - /* fall back to legacy interrupt */ pci_disable_msi(alx->hw.pdev); } @@ -725,7 +702,6 @@ static int alx_init_sw(struct alx_priv *alx) struct pci_dev *pdev = alx->hw.pdev; struct alx_hw *hw = &alx->hw; int err; - unsigned int head_size; err = alx_identify_hw(alx); if (err) { @@ -741,12 +717,7 @@ static int alx_init_sw(struct alx_priv *alx) hw->smb_timer = 400; hw->mtu = alx->dev->mtu; - alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); - head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - alx->rx_frag_size = roundup_pow_of_two(head_size); - alx->tx_ringsz = 256; alx->rx_ringsz = 512; hw->imt = 200; @@ -848,7 +819,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) { struct alx_priv *alx = netdev_priv(netdev); int max_frame = ALX_MAX_FRAME_LEN(mtu); - unsigned int head_size; if ((max_frame < ALX_MIN_FRAME_SIZE) || (max_frame > ALX_MAX_FRAME_SIZE)) @@ -860,9 +830,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) netdev->mtu = mtu; alx->hw.mtu = mtu; alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); - head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - alx->rx_frag_size = roundup_pow_of_two(head_size); netdev_update_features(netdev); if (netif_running(netdev)) alx_reinit(alx); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index e6e74ca86e60..b045dc072c40 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -269,15 +269,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) while (ring->start != ring->end) { int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[slot_idx]; - u32 ctl1; + u32 ctl0, ctl1; int len; if (slot_idx == empty_slot) break; + ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); len = ctl1 & BGMAC_DESC_CTL1_LEN; - if (ctl1 & BGMAC_DESC_CTL0_SOF) + if (ctl0 & BGMAC_DESC_CTL0_SOF) /* Unmap no longer used buffer */ dma_unmap_single(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); @@ -1322,7 +1323,8 @@ static int bgmac_open(struct net_device *net_dev) phy_start(net_dev->phydev); - netif_carrier_on(net_dev); + netif_start_queue(net_dev); + return 0; } diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 95f17f8cadac..16ed20357c5c 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u32 rr_quantum; u8 sq_idx = sq->sq_num; u8 pqs_vnic; + int svf; if (sq->sqs_mode) pqs_vnic = nic->pqs_vf[vnic]; @@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, /* 24 bytes for FCS, IPG and preamble */ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); - tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); + if (!sq->sqs_mode) { + tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); + } else { + for (svf = 0; svf < MAX_SQS_PER_VF; svf++) { + if (nic->vf_sqs[pqs_vnic][svf] == vnic) + break; + } + tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC); + tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF); + tl4 += (svf * NIC_TL4_PER_LMAC); + tl4 += (bgx * NIC_TL4_PER_BGX); + } tl4 += sq_idx; - if (sq->sqs_mode) - tl4 += vnic * 8; tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 3ed21988626b..63a39ac97d53 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac) } /* Clear rcvflt bit (latching high) and read it back */ - bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); if (bgx->use_training) { @@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac) return -1; } - /* Wait for MAC RX to be ready */ - if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, - SMU_RX_CTL_STATUS, true)) { - dev_err(&bgx->pdev->dev, "SMU RX link not okay\n"); - return -1; - } - /* Wait for BGX RX to be idle */ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); @@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac) return -1; } - if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { - dev_err(&bgx->pdev->dev, "Receive fault\n"); - return -1; - } - - /* Receive link is latching low. Force it high and verify it */ - bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); - if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, - SPU_STATUS1_RCV_LNK, false)) { - dev_err(&bgx->pdev->dev, "SPU receive link down\n"); - return -1; - } - + /* Clear receive packet disable */ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); cfg &= ~SPU_MISC_CTL_RX_DIS; bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); - return 0; + + /* Check for MAC RX faults */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); + /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ + cfg &= SMU_RX_CTL_STATUS; + if (!cfg) + return 0; + + /* Rx local/remote fault seen. + * Do lmac reinit to see if condition recovers + */ + bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type); + + return -1; } static void bgx_poll_for_link(struct work_struct *work) { struct lmac *lmac; - u64 link; + u64 spu_link, smu_link; lmac = container_of(work, struct lmac, dwork.work); @@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work) bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK, false); - link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); - if (link & SPU_STATUS1_RCV_LNK) { + spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); + smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL); + + if ((spu_link & SPU_STATUS1_RCV_LNK) && + !(smu_link & SMU_RX_CTL_STATUS)) { lmac->link_up = 1; if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) lmac->last_speed = 40000; @@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work) } if (lmac->last_link != lmac->link_up) { + if (lmac->link_up) { + if (bgx_xaui_check_link(lmac)) { + /* Errors, clear link_up state */ + lmac->link_up = 0; + lmac->last_speed = SPEED_UNKNOWN; + lmac->last_duplex = DUPLEX_UNKNOWN; + } + } lmac->last_link = lmac->link_up; - if (lmac->link_up) - bgx_xaui_check_link(lmac); } queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); @@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) { struct lmac *lmac; - u64 cmrx_cfg; + u64 cfg; lmac = &bgx->lmac[lmacid]; if (lmac->check_link) { @@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) destroy_workqueue(lmac->check_link); } - cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); - cmrx_cfg &= ~(1 << 15); - bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); + /* Disable packet reception */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_PKT_RX_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + /* Give chance for Rx/Tx FIFO to get drained */ + bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true); + bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true); + + /* Disable packet transmission */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_PKT_TX_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + /* Disable serdes lanes */ + if (!lmac->is_sgmii) + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); + else + bgx_reg_modify(bgx, lmacid, + BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN); + + /* Disable LMAC */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + bgx_flush_dmac_addrs(bgx, lmacid); if ((bgx->lmac_type != BGX_MODE_XFI) && diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 149e179363a1..42010d2e5ddf 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -41,6 +41,7 @@ #define BGX_CMRX_RX_STAT10 0xC0 #define BGX_CMRX_RX_BP_DROP 0xC8 #define BGX_CMRX_RX_DMAC_CTL 0x0E8 +#define BGX_CMRX_RX_FIFO_LEN 0x108 #define BGX_CMR_RX_DMACX_CAM 0x200 #define RX_DMACX_CAM_EN BIT_ULL(48) #define RX_DMACX_CAM_LMACID(x) (x << 49) @@ -50,6 +51,7 @@ #define BGX_CMR_CHAN_MSK_AND 0x450 #define BGX_CMR_BIST_STATUS 0x460 #define BGX_CMR_RX_LMACS 0x468 +#define BGX_CMRX_TX_FIFO_LEN 0x518 #define BGX_CMRX_TX_STAT0 0x600 #define BGX_CMRX_TX_STAT1 0x608 #define BGX_CMRX_TX_STAT2 0x610 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 864cb21351a4..ecdb6854a898 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2121,7 +2121,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; - struct ibmvnic_error_buff *error_buff; + struct ibmvnic_error_buff *error_buff, *tmp; unsigned long flags; bool found = false; int i; @@ -2133,7 +2133,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq, } spin_lock_irqsave(&adapter->error_list_lock, flags); - list_for_each_entry(error_buff, &adapter->errors, list) + list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) if (error_buff->error_id == crq->request_error_rsp.error_id) { found = true; list_del(&error_buff->list); @@ -3141,14 +3141,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq, static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) { - struct ibmvnic_inflight_cmd *inflight_cmd; + struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1; struct device *dev = &adapter->vdev->dev; - struct ibmvnic_error_buff *error_buff; + struct ibmvnic_error_buff *error_buff, *tmp2; unsigned long flags; unsigned long flags2; spin_lock_irqsave(&adapter->inflight_lock, flags); - list_for_each_entry(inflight_cmd, &adapter->inflight, list) { + list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) { switch (inflight_cmd->crq.generic.cmd) { case LOGIN: dma_unmap_single(dev, adapter->login_buf_token, @@ -3165,8 +3165,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) break; case REQUEST_ERROR_INFO: spin_lock_irqsave(&adapter->error_list_lock, flags2); - list_for_each_entry(error_buff, &adapter->errors, - list) { + list_for_each_entry_safe(error_buff, tmp2, + &adapter->errors, list) { dma_unmap_single(dev, error_buff->dma, error_buff->len, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 75e60897b7e7..73f745205a1c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -154,6 +154,16 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) writel(val, hw->hw_addr + reg); } +static bool e1000e_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + + return false; +} + /** * e1000_regdump - register printout routine * @hw: pointer to the HW structure @@ -2789,7 +2799,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) } /** - * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping * @adapter: board private structure to initialize **/ static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) @@ -3443,7 +3453,8 @@ static void e1000e_set_rx_mode(struct net_device *netdev) ew32(RCTL, rctl); - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX || + e1000e_vlan_used(adapter)) e1000e_vlan_strip_enable(adapter); else e1000e_vlan_strip_disable(adapter); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4763252bbf85..d1cdc2d76151 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -481,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { - dma_addr_t phy_ring_head, phy_ring_tail; + dma_addr_t phy_ring_tail; int cnt = MTK_DMA_SIZE; dma_addr_t dma_addr; int i; eth->scratch_ring = dma_alloc_coherent(eth->dev, cnt * sizeof(struct mtk_tx_dma), - &phy_ring_head, + ð->phy_scratch_ring, GFP_ATOMIC | __GFP_ZERO); if (unlikely(!eth->scratch_ring)) return -ENOMEM; eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); + if (unlikely(!eth->scratch_head)) + return -ENOMEM; + dma_addr = dma_map_single(eth->dev, eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, DMA_FROM_DEVICE); @@ -502,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) return -ENOMEM; memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); - phy_ring_tail = phy_ring_head + + phy_ring_tail = eth->phy_scratch_ring + (sizeof(struct mtk_tx_dma) * (cnt - 1)); for (i = 0; i < cnt; i++) { eth->scratch_ring[i].txd1 = (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); if (i < cnt - 1) - eth->scratch_ring[i].txd2 = (phy_ring_head + + eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + ((i + 1) * sizeof(struct mtk_tx_dma))); eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); } - mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); + mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); @@ -671,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, err_dma: do { - tx_buf = mtk_desc_to_tx_buf(ring, txd); + tx_buf = mtk_desc_to_tx_buf(ring, itxd); /* unmap dma */ mtk_tx_unmap(&dev->dev, tx_buf); @@ -701,6 +704,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) return nfrags; } +static int mtk_queue_stopped(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + if (netif_queue_stopped(eth->netdev[i])) + return 1; + } + + return 0; +} + static void mtk_wake_queue(struct mtk_eth *eth) { int i; @@ -766,12 +783,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) goto drop; - if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) mtk_stop_queue(eth); - if (unlikely(atomic_read(&ring->free_count) > - ring->thresh)) - mtk_wake_queue(eth); - } + spin_unlock_irqrestore(ð->page_lock, flags); return NETDEV_TX_OK; @@ -826,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { skb_free_frag(new_data); + netdev->stats.rx_dropped++; goto release_desc; } @@ -833,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb = build_skb(data, ring->frag_size); if (unlikely(!skb)) { put_page(virt_to_head_page(new_data)); + netdev->stats.rx_dropped++; goto release_desc; } skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); @@ -921,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) } mtk_tx_unmap(eth->dev, tx_buf); - ring->last_free->txd2 = next_cpu; ring->last_free = desc; atomic_inc(&ring->free_count); @@ -946,7 +961,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) if (!total) return 0; - if (atomic_read(&ring->free_count) > ring->thresh) + if (mtk_queue_stopped(eth) && + (atomic_read(&ring->free_count) > ring->thresh)) mtk_wake_queue(eth); return total; @@ -1027,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); ring->next_free = &ring->dma[0]; - ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; - ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, - MAX_SKB_FRAGS); + ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; + ring->thresh = MAX_SKB_FRAGS; /* make sure that all changes to the dma ring are flushed before we * continue @@ -1207,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth) for (i = 0; i < MTK_MAC_COUNT; i++) if (eth->netdev[i]) netdev_reset_queue(eth->netdev[i]); + if (eth->scratch_ring) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), + eth->scratch_ring, + eth->phy_scratch_ring); + eth->scratch_ring = NULL; + eth->phy_scratch_ring = 0; + } mtk_tx_clean(eth); mtk_rx_clean(eth); kfree(eth->scratch_head); @@ -1269,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth) mtk_w32(eth, MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | - MTK_RX_BT_32DWORDS, + MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO, MTK_QDMA_GLO_CFG); return 0; @@ -1383,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth) /* disable delay and normal interrupt */ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_disable(eth, ~0); mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); mtk_w32(eth, 0, MTK_RST_GL); @@ -1697,7 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); - eth->netdev[id]->watchdog_timeo = HZ; + eth->netdev[id]->watchdog_timeo = 5 * HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; eth->netdev[id]->base_addr = (unsigned long)eth->base; eth->netdev[id]->vlan_features = MTK_HW_FEATURES & diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index eed626d56ea4..a5eb7c62306b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -91,6 +91,7 @@ #define MTK_QDMA_GLO_CFG 0x1A04 #define MTK_RX_2B_OFFSET BIT(31) #define MTK_RX_BT_32DWORDS (3 << 11) +#define MTK_NDP_CO_PRO BIT(10) #define MTK_TX_WB_DDONE BIT(6) #define MTK_DMA_SIZE_16DWORDS (2 << 4) #define MTK_RX_DMA_BUSY BIT(3) @@ -357,6 +358,7 @@ struct mtk_rx_ring { * @rx_ring: Pointer to the memore holding info about the RX ring * @rx_napi: The NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring + * @phy_scratch_ring: physical address of scratch_ring * @scratch_head: The scratch memory that scratch_ring points to. * @clk_ethif: The ethif clock * @clk_esw: The switch clock @@ -384,6 +386,7 @@ struct mtk_eth { struct mtk_rx_ring rx_ring; struct napi_struct rx_napi; struct mtk_tx_dma *scratch_ring; + dma_addr_t phy_scratch_ring; void *scratch_head; struct clk *clk_ethif; struct clk *clk_esw; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e94ca1c3fc7c..f04a423ff79d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) priv->cmd.free_head = 0; sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); - spin_lock_init(&priv->cmd.context_lock); for (priv->cmd.token_mask = 1; priv->cmd.token_mask < priv->cmd.max_cmds; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d42083a8a104..6083775dae16 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -417,14 +417,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); - if (err) + if (err) { en_err(priv, "Failed configuring VLAN filter\n"); + goto out; + } } - if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) - en_dbg(HW, priv, "failed adding vlan %d\n", vid); - mutex_unlock(&mdev->state_lock); + err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); + if (err) + en_dbg(HW, priv, "Failed adding vlan %d\n", vid); - return 0; +out: + mutex_unlock(&mdev->state_lock); + return err; } static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, @@ -432,7 +436,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - int err; + int err = 0; en_dbg(HW, priv, "Killing VID:%d\n", vid); @@ -449,7 +453,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, } mutex_unlock(&mdev->state_lock); - return 0; + return err; } static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) @@ -2042,11 +2046,20 @@ err: return -ENOMEM; } +static void mlx4_en_shutdown(struct net_device *dev) +{ + rtnl_lock(); + netif_device_detach(dev); + mlx4_en_close(dev); + rtnl_unlock(); +} void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + bool shutdown = mdev->dev->persist->interface_state & + MLX4_INTERFACE_STATE_SHUTDOWN; en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); @@ -2054,7 +2067,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev) if (priv->registered) { devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, priv->port)); - unregister_netdev(dev); + if (shutdown) + mlx4_en_shutdown(dev); + else + unregister_netdev(dev); } if (priv->allocated) @@ -2079,7 +2095,8 @@ void mlx4_en_destroy_netdev(struct net_device *dev) kfree(priv->tx_ring); kfree(priv->tx_cq); - free_netdev(dev); + if (!shutdown) + free_netdev(dev); } static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) @@ -2464,9 +2481,14 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, * strip that feature if this is an IPv6 encapsulated frame. */ if (skb->encapsulation && - (skb->ip_summed == CHECKSUM_PARTIAL) && - (ip_hdr(skb)->version != 4)) - features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + (skb->ip_summed == CHECKSUM_PARTIAL)) { + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (!priv->vxlan_port || + (ip_hdr(skb)->version != 4) || + (udp_hdr(skb)->dest != priv->vxlan_port)) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } return features; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 3564aad778a3..b673a5fc6b6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3223,6 +3223,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); + spin_lock_init(&priv->cmd.context_lock); INIT_LIST_HEAD(&priv->bf_list); mutex_init(&priv->bf_mutex); @@ -4135,8 +4136,11 @@ static void mlx4_shutdown(struct pci_dev *pdev) mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); - if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { + /* Notify mlx4 clients that the kernel is being shut down */ + persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN; mlx4_unload_one(pdev); + } mutex_unlock(&persist->interface_state_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index dcd2df6518de..0b4986268cc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -545,6 +545,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); + MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index da885c0dfebe..b97511bf4c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -453,7 +453,7 @@ enum mlx5e_traffic_types { }; enum { - MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 39a4d961a58e..b29684d9fcd6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -146,7 +146,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) #define MLX5E_NUM_SQ_STATS(priv) \ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ test_bit(MLX5E_STATE_OPENED, &priv->state)) -#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv)) +#define MLX5E_NUM_PFC_COUNTERS(priv) \ + (hweight8(mlx5e_query_pfc_combined(priv)) * \ + NUM_PPORT_PER_PRIO_PFC_COUNTERS) static int mlx5e_get_sset_count(struct net_device *dev, int sset) { @@ -175,42 +177,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) /* SW counters */ for (i = 0; i < NUM_SW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name); + strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format); /* Q counters */ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name); + strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format); /* VPORT counters */ for (i = 0; i < NUM_VPORT_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_stats_desc[i].name); + vport_stats_desc[i].format); /* PPORT counters */ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_802_3_stats_desc[i].name); + pport_802_3_stats_desc[i].format); for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2863_stats_desc[i].name); + pport_2863_stats_desc[i].format); for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2819_stats_desc[i].name); + pport_2819_stats_desc[i].format); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", - prio, - pport_per_prio_traffic_stats_desc[i].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_traffic_stats_desc[i].format, prio); } pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", - prio, pport_per_prio_pfc_stats_desc[i].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_pfc_stats_desc[i].format, prio); } } @@ -220,16 +221,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, - rq_stats_desc[j].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i); for (tc = 0; tc < priv->params.num_tc; tc++) for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, - "tx%d_%s", - priv->channeltc_to_txq_map[i][tc], - sq_stats_desc[j].name); + sq_stats_desc[j].format, + priv->channeltc_to_txq_map[i][tc]); } static void mlx5e_get_strings(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 02a0f1796f7b..a64ce5df5810 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -107,11 +107,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; - s->lro_packets += rq_stats->lro_packets; - s->lro_bytes += rq_stats->lro_bytes; + s->rx_lro_packets += rq_stats->lro_packets; + s->rx_lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; - s->rx_csum_sw += rq_stats->csum_sw; - s->rx_csum_inner += rq_stats->csum_inner; + s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_frag += rq_stats->mpwqe_frag; @@ -124,24 +124,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; - s->tso_packets += sq_stats->tso_packets; - s->tso_bytes += sq_stats->tso_bytes; - s->tso_inner_packets += sq_stats->tso_inner_packets; - s->tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; - s->tx_csum_inner += sq_stats->csum_offload_inner; - tx_offload_none += sq_stats->csum_offload_none; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + tx_offload_none += sq_stats->csum_none; } } /* Update calculated offload counters */ - s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; - s->rx_csum_good = s->rx_packets - s->rx_csum_none - - s->rx_csum_sw; + s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner; + s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete; - s->link_down_events = MLX5_GET(ppcnt_reg, + s->link_down_events_phy = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, counter_set.phys_layer_cntrs.link_down_events); } @@ -246,7 +245,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, { struct mlx5e_priv *priv = vpriv; - if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return; switch (event) { @@ -262,12 +261,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { - set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); } static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { - clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); } @@ -590,7 +589,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); int err; - err = mlx5_alloc_map_uar(mdev, &sq->uar, true); + err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf)); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index bd947704b59c..022acc2e8922 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -689,7 +689,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (is_first_ethertype_ip(skb)) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); - rq->stats.csum_sw++; + rq->stats.csum_complete++; return; } @@ -699,7 +699,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; skb->encapsulation = 1; - rq->stats.csum_inner++; + rq->stats.csum_unnecessary_inner++; } return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 83bc32b25849..fcd490cc5610 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -42,9 +42,11 @@ be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) +#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) struct counter_desc { - char name[ETH_GSTRING_LEN]; + char format[ETH_GSTRING_LEN]; int offset; /* Byte offset */ }; @@ -53,18 +55,18 @@ struct mlx5e_sw_stats { u64 rx_bytes; u64 tx_packets; u64 tx_bytes; - u64 tso_packets; - u64 tso_bytes; - u64 tso_inner_packets; - u64 tso_inner_bytes; - u64 lro_packets; - u64 lro_bytes; - u64 rx_csum_good; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_tso_inner_packets; + u64 tx_tso_inner_bytes; + u64 rx_lro_packets; + u64 rx_lro_bytes; + u64 rx_csum_unnecessary; u64 rx_csum_none; - u64 rx_csum_sw; - u64 rx_csum_inner; - u64 tx_csum_offload; - u64 tx_csum_inner; + u64 rx_csum_complete; + u64 rx_csum_unnecessary_inner; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; u64 tx_queue_stopped; u64 tx_queue_wake; u64 tx_queue_dropped; @@ -76,7 +78,7 @@ struct mlx5e_sw_stats { u64 rx_cqe_compress_pkts; /* Special handling counters */ - u64 link_down_events; + u64 link_down_events_phy; }; static const struct counter_desc sw_stats_desc[] = { @@ -84,18 +86,18 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, @@ -105,7 +107,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, }; struct mlx5e_qcounter_stats { @@ -125,12 +127,6 @@ struct mlx5e_vport_stats { }; static const struct counter_desc vport_stats_desc[] = { - { "rx_vport_error_packets", - VPORT_COUNTER_OFF(received_errors.packets) }, - { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) }, - { "tx_vport_error_packets", - VPORT_COUNTER_OFF(transmit_errors.packets) }, - { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) }, { "rx_vport_unicast_packets", VPORT_COUNTER_OFF(received_eth_unicast.packets) }, { "rx_vport_unicast_bytes", @@ -192,94 +188,68 @@ struct mlx5e_pport_stats { }; static const struct counter_desc pport_802_3_stats_desc[] = { - { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) }, - { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) }, - { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, - { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) }, - { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) }, - { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) }, - { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, - { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, - { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, - { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, - { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) }, - { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) }, - { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) }, - { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, - { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, - { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) }, - { "unsupported_op_rx", - PPORT_802_3_OFF(a_unsupported_opcodes_received) }, - { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, - { "pause_ctrl_tx", - PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, + { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, + { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, + { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, + { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, + { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, + { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, + { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, + { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, + { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, + { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, + { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, + { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, + { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, + { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, + { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, + { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, + { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, + { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, }; static const struct counter_desc pport_2863_stats_desc[] = { - { "in_octets", PPORT_2863_OFF(if_in_octets) }, - { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) }, - { "in_discards", PPORT_2863_OFF(if_in_discards) }, - { "in_errors", PPORT_2863_OFF(if_in_errors) }, - { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) }, - { "out_octets", PPORT_2863_OFF(if_out_octets) }, - { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) }, - { "out_discards", PPORT_2863_OFF(if_out_discards) }, - { "out_errors", PPORT_2863_OFF(if_out_errors) }, - { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) }, - { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) }, - { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) }, - { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) }, + { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, + { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, + { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, }; static const struct counter_desc pport_2819_stats_desc[] = { - { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) }, - { "octets", PPORT_2819_OFF(ether_stats_octets) }, - { "pkts", PPORT_2819_OFF(ether_stats_pkts) }, - { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) }, - { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) }, - { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) }, - { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) }, - { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) }, - { "fragments", PPORT_2819_OFF(ether_stats_fragments) }, - { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) }, - { "collisions", PPORT_2819_OFF(ether_stats_collisions) }, - { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) }, - { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, - { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, - { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, - { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, - { "p1024to1518octets", - PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, - { "p1519to2047octets", - PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, - { "p2048to4095octets", - PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, - { "p4096to8191octets", - PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, - { "p8192to10239octets", - PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, + { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, + { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, + { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, + { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, + { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, + { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, + { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, + { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, + { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, + { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, + { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, + { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, + { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, }; static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { - { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) }, - { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) }, - { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) }, - { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) }, + { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, + { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, + { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, + { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, }; static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { - { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) }, - { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, - { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) }, - { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, - { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, + { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) }, + { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, + { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) }, + { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, + { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; struct mlx5e_rq_stats { u64 packets; u64 bytes; - u64 csum_sw; - u64 csum_inner; + u64 csum_complete; + u64 csum_unnecessary_inner; u64 csum_none; u64 lro_packets; u64 lro_bytes; @@ -292,19 +262,19 @@ struct mlx5e_rq_stats { }; static const struct counter_desc rq_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, - { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, }; struct mlx5e_sq_stats { @@ -315,28 +285,28 @@ struct mlx5e_sq_stats { u64 tso_bytes; u64 tso_inner_packets; u64 tso_inner_bytes; - u64 csum_offload_inner; + u64 csum_partial_inner; u64 nop; /* less likely accessed in data path */ - u64 csum_offload_none; + u64 csum_none; u64 stopped; u64 wake; u64 dropped; }; static const struct counter_desc sq_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, }; #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index b000ddc29553..5a750b9cd006 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -192,12 +192,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) if (skb->encapsulation) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | MLX5_ETH_WQE_L4_INNER_CSUM; - sq->stats.csum_offload_inner++; + sq->stats.csum_partial_inner++; } else { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; } } else - sq->stats.csum_offload_none++; + sq->stats.csum_none++; if (sq->cc != sq->prev_cc) { sq->prev_cc = sq->cc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 08cae3485960..1f3b6d6a852e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1518,8 +1518,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ - { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ + { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ + { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index f2fd1ef16da7..05de77267d58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) struct mlx5e_vxlan *vxlan; int err; + if (mlx5e_vxlan_lookup_port(priv, port)) + goto free_work; + if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) goto free_work; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index ce21ee5b2357..821a087c7ae2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); goto err_db_free; } @@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); goto err_db_free; } @@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } - err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf); + err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); goto err_db_free; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d23948b88962..a453fffaa1a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -409,7 +409,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, } mlxsw_sp_txhdr_construct(skb, &tx_info); - len = skb->len; + /* TX header is consumed by HW on the way so we shouldn't count its + * bytes as being sent. + */ + len = skb->len - MLXSW_TXHDR_LEN; + /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 3842eab9449a..25f658b3849a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -316,7 +316,10 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, } } mlxsw_sx_txhdr_construct(skb, &tx_info); - len = skb->len; + /* TX header is consumed by HW on the way so we shouldn't count its + * bytes as being sent. + */ + len = skb->len - MLXSW_TXHDR_LEN; /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 2195ed3053da..c25a8ba6cf9f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2015,7 +2015,7 @@ static void nfp_net_open_stack(struct nfp_net *nn) netif_tx_wake_all_queues(nn->netdev); - enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); nfp_net_read_link_status(nn); } @@ -2044,7 +2044,7 @@ static int nfp_net_netdev_open(struct net_device *netdev) NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); if (err) goto err_free_exn; - disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), GFP_KERNEL); @@ -2133,7 +2133,7 @@ static void nfp_net_close_stack(struct nfp_net *nn) { unsigned int r; - disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); netif_carrier_off(nn->netdev); nn->link_up = false; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index d121a8bf6b20..a12c6caa6c66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -72,6 +72,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->mtu = cpu_to_le16(p_params->mtu); p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->drop_ttl0_en = p_params->drop_ttl0; + p_ramrod->untagged = p_params->only_untagged; SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); @@ -247,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE)); - SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, - (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && - !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); - SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE)); @@ -1756,7 +1753,8 @@ static int qed_start_vport(struct qed_dev *cdev, start.vport_id, start.mtu); } - qed_reset_vport_stats(cdev); + if (params->clear_stats) + qed_reset_vport_stats(cdev); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index e32ee57cdfee..1f13abb5c316 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1088,6 +1088,7 @@ static int qed_get_port_type(u32 media_type) case MEDIA_SFPP_10G_FIBER: case MEDIA_SFP_1G_FIBER: case MEDIA_XFP_FIBER: + case MEDIA_MODULE_FIBER: case MEDIA_KR: port_type = PORT_FIBRE; break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index ad9bf5c85c3f..97ffeae262bb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -213,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD); db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - - /* validate producer is up to-date */ - rmb(); - db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); - /* do not reorder */ - barrier(); + /* make sure the SPQE is updated before the doorbell */ + wmb(); DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); /* make sure doorbell is rang */ - mmiowb(); + wmb(); DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", @@ -620,7 +616,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, *p_en2 = *p_ent; - kfree(p_ent); + /* EBLOCK responsible to free the allocated p_ent */ + if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) + kfree(p_ent); p_ent = p_en2; } @@ -755,6 +753,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, * Thus, after gaining the answer perform the cleanup here. */ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + + if (p_ent->queue == &p_spq->unlimited_pending) { + /* This is an allocated p_ent which does not need to + * return to pool. + */ + kfree(p_ent); + return rc; + } + if (rc) goto spq_post_fail2; @@ -850,8 +857,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code); - if (found->comp_mode != QED_SPQ_MODE_EBLOCK) - /* EBLOCK is responsible for freeing its own entry */ + if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || + (found->queue == &p_spq->unlimited_pending)) + /* EBLOCK is responsible for returning its own entry into the + * free list, unless it originally added the entry into the + * unlimited pending list. + */ qed_spq_return_entry(p_hwfn, found); /* Attempt to post pending requests */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 2972742c6adb..19bc631e1f04 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -3222,7 +3222,7 @@ static int qede_stop_queues(struct qede_dev *edev) return rc; } -static int qede_start_queues(struct qede_dev *edev) +static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { int rc, tc, i; int vlan_removal_en = 1; @@ -3453,6 +3453,7 @@ out: enum qede_load_mode { QEDE_LOAD_NORMAL, + QEDE_LOAD_RELOAD, }; static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) @@ -3491,7 +3492,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) goto err3; DP_INFO(edev, "Setup IRQs succeeded\n"); - rc = qede_start_queues(edev); + rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); if (rc) goto err4; DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); @@ -3546,7 +3547,7 @@ void qede_reload(struct qede_dev *edev, if (func) func(edev, args); - qede_load(edev, QEDE_LOAD_NORMAL); + qede_load(edev, QEDE_LOAD_RELOAD); mutex_lock(&edev->qede_lock); qede_config_rx_mode(edev->ndev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 7bd6f25b4625..607bb7d4514d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -2220,7 +2220,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) if (!opcode) return; - ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0])); + ring = QLCNIC_FETCH_RING_ID(sts_data[0]); qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data); desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 133e9e35be9e..4c83739d158f 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx, const struct efx_farch_register_test *regs, size_t n_regs) { - unsigned address = 0, i, j; + unsigned address = 0; + int i, j; efx_oword_t mask, imask, original, reg, buf; for (i = 0; i < n_regs; ++i) { diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 8af25563f627..b5ab5e120bca 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -116,7 +116,6 @@ struct smsc911x_data { struct phy_device *phy_dev; struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; unsigned int using_extphy; int last_duplex; int last_carrier; @@ -1073,7 +1072,6 @@ static int smsc911x_mii_init(struct platform_device *pdev, pdata->mii_bus->priv = pdata; pdata->mii_bus->read = smsc911x_mii_read; pdata->mii_bus->write = smsc911x_mii_write; - memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus)); pdata->mii_bus->parent = &pdev->dev; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 6d0c5a0c34c6..1a93a1f28433 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2556,8 +2556,6 @@ static int cpsw_probe(struct platform_device *pdev) clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); @@ -2585,8 +2583,6 @@ static int cpsw_remove(struct platform_device *pdev) unregister_netdev(ndev); cpsw_ale_destroy(priv->ale); - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); pm_runtime_disable(&pdev->dev); device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 0a15acc075b3..11213a38c795 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance) if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) { struct mpipe_data *md = &mpipe_data[instance]; struct skb_shared_hwtstamps shhwtstamps; - struct timespec ts; + struct timespec64 ts; shtx->tx_flags |= SKBTX_IN_PROGRESS; gxio_mpipe_get_timestamp(&md->context, &ts); @@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = { /* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */ static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) { - struct timespec ts; + struct timespec64 ts; - getnstimeofday(&ts); + ktime_get_ts64(&ts); gxio_mpipe_set_timestamp(&md->context, &ts); mutex_init(&md->ptp_lock); |