diff options
Diffstat (limited to 'drivers/net/ethernet')
118 files changed, 2028 insertions, 951 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 79e1a0282163..17b2126075e0 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2461,7 +2461,7 @@ boomerang_interrupt(int irq, void *dev_id) int i; pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].frag[0].addr), - le32_to_cpu(vp->tx_ring[entry].frag[0].length), + le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, PCI_DMA_TODEVICE); for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 2777289a26c0..2f79d29f17f2 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -1501,6 +1501,7 @@ static const struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), + PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009), PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 3f3bcbea15bd..0907ab6ff309 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -2380,7 +2380,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) sizeof(u32), &tx_ring->tx_status_pa, GFP_KERNEL); - if (!tx_ring->tx_status_pa) { + if (!tx_ring->tx_status) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx status block\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 17472851674f..f749e4d389eb 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev) priv->mdio->id); mdiobus_unregister(priv->mdio); - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); priv->mdio = NULL; } diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 87e727b921dc..fcdf5dda448f 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -50,8 +50,8 @@ static const char version[] = static void write_rreg(u_long base, u_int reg, u_int val) { asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "str%?h %0, [%2, #-4] @ NET_RDP" + "strh %1, [%2] @ NET_RAP\n\t" + "strh %0, [%2, #-4] @ NET_RDP" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); } @@ -60,8 +60,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg) { unsigned short v; asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "ldr%?h %0, [%2, #-4] @ NET_RDP" + "strh %1, [%2] @ NET_RAP\n\t" + "ldrh %0, [%2, #-4] @ NET_RDP" : "=r" (v) : "r" (reg), "r" (ISAIO_BASE + 0x0464)); return v; @@ -70,8 +70,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg) static inline void write_ireg(u_long base, u_int reg, u_int val) { asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "str%?h %0, [%2, #8] @ NET_IDP" + "strh %1, [%2] @ NET_RAP\n\t" + "strh %0, [%2, #8] @ NET_IDP" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); } @@ -80,8 +80,8 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg) { u_short v; asm volatile( - "str%?h %1, [%2] @ NAT_RAP\n\t" - "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" + "strh %1, [%2] @ NAT_RAP\n\t" + "ldrh %0, [%2, #8] @ NET_IDP\n\t" : "=r" (v) : "r" (reg), "r" (ISAIO_BASE + 0x0464)); return v; @@ -96,7 +96,7 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne offset = ISAMEM_BASE + (offset << 1); length = (length + 1) & ~1; if ((int)buf & 2) { - asm volatile("str%?h %2, [%0], #4" + asm volatile("strh %2, [%0], #4" : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); buf += 2; length -= 2; @@ -104,20 +104,20 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne while (length > 8) { register unsigned int tmp asm("r2"), tmp2 asm("r3"); asm volatile( - "ldm%?ia %0!, {%1, %2}" + "ldmia %0!, {%1, %2}" : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); length -= 8; asm volatile( - "str%?h %1, [%0], #4\n\t" - "mov%? %1, %1, lsr #16\n\t" - "str%?h %1, [%0], #4\n\t" - "str%?h %2, [%0], #4\n\t" - "mov%? %2, %2, lsr #16\n\t" - "str%?h %2, [%0], #4" + "strh %1, [%0], #4\n\t" + "mov %1, %1, lsr #16\n\t" + "strh %1, [%0], #4\n\t" + "strh %2, [%0], #4\n\t" + "mov %2, %2, lsr #16\n\t" + "strh %2, [%0], #4" : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); } while (length > 0) { - asm volatile("str%?h %2, [%0], #4" + asm volatile("strh %2, [%0], #4" : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); buf += 2; length -= 2; @@ -132,23 +132,23 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned if ((int)buf & 2) { unsigned int tmp; asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "str%?b %2, [%1], #1\n\t" - "mov%? %2, %2, lsr #8\n\t" - "str%?b %2, [%1], #1" + "ldrh %2, [%0], #4\n\t" + "strb %2, [%1], #1\n\t" + "mov %2, %2, lsr #8\n\t" + "strb %2, [%1], #1" : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); length -= 2; } while (length > 8) { register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "ldr%?h %4, [%0], #4\n\t" - "ldr%?h %3, [%0], #4\n\t" - "orr%? %2, %2, %4, lsl #16\n\t" - "ldr%?h %4, [%0], #4\n\t" - "orr%? %3, %3, %4, lsl #16\n\t" - "stm%?ia %1!, {%2, %3}" + "ldrh %2, [%0], #4\n\t" + "ldrh %4, [%0], #4\n\t" + "ldrh %3, [%0], #4\n\t" + "orr %2, %2, %4, lsl #16\n\t" + "ldrh %4, [%0], #4\n\t" + "orr %3, %3, %4, lsl #16\n\t" + "stmia %1!, {%2, %3}" : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) : "0" (offset), "1" (buf)); length -= 8; @@ -156,10 +156,10 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned while (length > 0) { unsigned int tmp; asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "str%?b %2, [%1], #1\n\t" - "mov%? %2, %2, lsr #8\n\t" - "str%?b %2, [%1], #1" + "ldrh %2, [%0], #4\n\t" + "strb %2, [%1], #1\n\t" + "mov %2, %2, lsr #8\n\t" + "strb %2, [%1], #1" : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); length -= 2; } diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 256f590f6bb1..3a7ebfdda57d 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -547,8 +547,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int /* Make certain the data structures used by the LANCE are aligned and DMAble. */ lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); - if(lp==NULL) - return -ENODEV; + if (!lp) + return -ENOMEM; if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); dev->ml_priv = lp; lp->name = chipname; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index a4799c1fc7d4..5eb9b20c0eea 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -628,6 +628,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) int ret; ring = pdata->rx_ring; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, IRQF_SHARED, ring->irq_name, ring); if (ret) @@ -635,6 +636,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) if (pdata->cq_cnt) { ring = pdata->tx_ring->cp_ring; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, IRQF_SHARED, ring->irq_name, ring); if (ret) { @@ -649,15 +651,19 @@ static int xgene_enet_register_irq(struct net_device *ndev) static void xgene_enet_free_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata; + struct xgene_enet_desc_ring *ring; struct device *dev; pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); - devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); + ring = pdata->rx_ring; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); if (pdata->cq_cnt) { - devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, - pdata->tx_ring->cp_ring); + ring = pdata->tx_ring->cp_ring; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); } } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 70d5b62c125a..248dfc40a761 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -25,6 +25,7 @@ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/efi.h> +#include <linux/irq.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/of_net.h> diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index abe1eabc0171..6446af1403f7 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) struct sk_buff *skb = tx_buff->skb; unsigned int info = le32_to_cpu(txbd->info); - if ((info & FOR_EMAC) || !txbd->data) + if ((info & FOR_EMAC) || !txbd->data || !skb) break; if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { @@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) txbd->data = 0; txbd->info = 0; + tx_buff->skb = NULL; *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; } @@ -446,6 +447,9 @@ static int arc_emac_open(struct net_device *ndev) *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; } + priv->txbd_curr = 0; + priv->txbd_dirty = 0; + /* Clean Tx BD's */ memset(priv->txbd, 0, TX_RING_SZ); @@ -514,6 +518,64 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) } /** + * arc_free_tx_queue - free skb from tx queue + * @ndev: Pointer to the network device. + * + * This function must be called while EMAC disable + */ +static void arc_free_tx_queue(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < TX_BD_NUM; i++) { + struct arc_emac_bd *txbd = &priv->txbd[i]; + struct buffer_state *tx_buff = &priv->tx_buff[i]; + + if (tx_buff->skb) { + dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), + dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); + + /* return the sk_buff to system */ + dev_kfree_skb_irq(tx_buff->skb); + } + + txbd->info = 0; + txbd->data = 0; + tx_buff->skb = NULL; + } +} + +/** + * arc_free_rx_queue - free skb from rx queue + * @ndev: Pointer to the network device. + * + * This function must be called while EMAC disable + */ +static void arc_free_rx_queue(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < RX_BD_NUM; i++) { + struct arc_emac_bd *rxbd = &priv->rxbd[i]; + struct buffer_state *rx_buff = &priv->rx_buff[i]; + + if (rx_buff->skb) { + dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); + + /* return the sk_buff to system */ + dev_kfree_skb_irq(rx_buff->skb); + } + + rxbd->info = 0; + rxbd->data = 0; + rx_buff->skb = NULL; + } +} + +/** * arc_emac_stop - Close the network device. * @ndev: Pointer to the network device. * @@ -534,6 +596,10 @@ static int arc_emac_stop(struct net_device *ndev) /* Disable EMAC */ arc_reg_clr(priv, R_CTRL, EN_MASK); + /* Return the sk_buff to system */ + arc_free_tx_queue(ndev); + arc_free_rx_queue(ndev); + return 0; } @@ -610,7 +676,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); - priv->tx_buff[*txbd_curr].skb = skb; priv->txbd[*txbd_curr].data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ @@ -620,6 +685,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); + /* Make sure info word is set */ + wmb(); + + priv->tx_buff[*txbd_curr].skb = skb; + /* Increment index to point to the next BD */ *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index ecc4a334c507..08a23e6b60e9 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget) nb8800_tx_done(dev); again: - while (work < budget) { + do { struct nb8800_rx_buf *rxb; unsigned int len; @@ -330,7 +330,7 @@ again: rxd->report = 0; last = next; work++; - } + } while (work < budget); if (work) { priv->rx_descs[last].desc.config |= DESC_EOC; @@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev) goto err_disable_clk; } - priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (of_phy_is_fixed_link(pdev->dev.of_node)) { + ret = of_phy_register_fixed_link(pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "bad fixed-link spec\n"); + goto err_free_bus; + } + priv->phy_node = of_node_get(pdev->dev.of_node); + } + + if (!priv->phy_node) + priv->phy_node = of_parse_phandle(pdev->dev.of_node, + "phy-handle", 0); + if (!priv->phy_node) { dev_err(&pdev->dev, "no PHY specified\n"); ret = -ENODEV; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 8550df189ceb..19f7cd02e085 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -151,8 +151,11 @@ config BNX2X_VXLAN config BGMAC tristate "BCMA bus GBit core support" - depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) + depends on BCMA && BCMA_HOST_SOC + depends on HAS_DMA + depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST select PHYLIB + select FIXED_PHY ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. They can be found on BCM47xx SoCs and provide gigabit ethernet. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 27aa0802d87d..91874d24fd56 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -4896,9 +4896,9 @@ struct c2s_pri_trans_table_entry { * cfc delete event data */ struct cfc_del_event_data { - u32 cid; - u32 reserved0; - u32 reserved1; + __le32 cid; + __le32 reserved0; + __le32 reserved1; }; @@ -5114,15 +5114,9 @@ struct vf_pf_channel_zone_trigger { * zone that triggers the in-bound interrupt */ struct trigger_vf_zone { -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 reserved0; - struct vf_pf_channel_zone_trigger vf_pf_channel; -#elif defined(__LITTLE_ENDIAN) struct vf_pf_channel_zone_trigger vf_pf_channel; u8 reserved0; u16 reserved1; -#endif u32 reserved2; }; @@ -5207,9 +5201,9 @@ struct e2_integ_data { * set mac event data */ struct eth_event_data { - u32 echo; - u32 reserved0; - u32 reserved1; + __le32 echo; + __le32 reserved0; + __le32 reserved1; }; @@ -5219,9 +5213,9 @@ struct eth_event_data { struct vf_pf_event_data { u8 vf_id; u8 reserved0; - u16 reserved1; - u32 msg_addr_lo; - u32 msg_addr_hi; + __le16 reserved1; + __le32 msg_addr_lo; + __le32 msg_addr_hi; }; /* @@ -5230,9 +5224,9 @@ struct vf_pf_event_data { struct vf_flr_event_data { u8 vf_id; u8 reserved0; - u16 reserved1; - u32 reserved2; - u32 reserved3; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; }; /* @@ -5241,9 +5235,9 @@ struct vf_flr_event_data { struct malicious_vf_event_data { u8 vf_id; u8 err_id; - u16 reserved1; - u32 reserved2; - u32 reserved3; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; }; /* diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index d946bba43726..1fb80100e5e7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6185,26 +6185,80 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) shift -= 4; digit = ((num & mask) >> shift); if (digit == 0 && remove_leading_zeros) { - mask = mask >> 4; - continue; - } else if (digit < 0xa) - *str_ptr = digit + '0'; - else - *str_ptr = digit - 0xa + 'a'; - remove_leading_zeros = 0; - str_ptr++; - (*len)--; + *str_ptr = '0'; + } else { + if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + } mask = mask >> 4; if (shift == 4*4) { + if (remove_leading_zeros) { + str_ptr++; + (*len)--; + } *str_ptr = '.'; str_ptr++; (*len)--; remove_leading_zeros = 1; } } + if (remove_leading_zeros) + (*len)--; return 0; } +static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len) +{ + u8 *str_ptr = str; + u32 mask = 0x00f00000; + u8 shift = 8*3; + u8 digit; + u8 remove_leading_zeros = 1; + + if (*len < 10) { + /* Need more than 10chars for this format */ + *str_ptr = '\0'; + (*len)--; + return -EINVAL; + } + + while (shift > 0) { + shift -= 4; + digit = ((num & mask) >> shift); + if (digit == 0 && remove_leading_zeros) { + *str_ptr = '0'; + } else { + if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + } + mask = mask >> 4; + if ((shift == 4*4) || (shift == 4*2)) { + if (remove_leading_zeros) { + str_ptr++; + (*len)--; + } + *str_ptr = '.'; + str_ptr++; + (*len)--; + remove_leading_zeros = 1; + } + } + if (remove_leading_zeros) + (*len)--; + return 0; +} static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) { @@ -9677,8 +9731,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, if (bnx2x_is_8483x_8485x(phy)) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); - bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, - phy->ver_addr); + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) + fw_ver1 &= 0xfff; + bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr); } else { /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ @@ -9732,16 +9787,32 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val, offset, i; + u16 val, led3_blink_rate, offset, i; static struct bnx2x_reg_set reg_set[] = { {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, - {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000}, {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} }; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* Set LED5 source */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x90); + led3_blink_rate = 0x000f; + } else { + led3_blink_rate = 0x0000; + } + /* Set LED3 BLINK */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_BLINK, + led3_blink_rate); + /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -9749,6 +9820,9 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, val &= 0xFE00; val |= 0x0092; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) + val |= 2 << 12; /* LED5 ON based on source */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val); @@ -9762,10 +9836,17 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, else offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; - /* stretch_en for LED3*/ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) + val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT | + MDIO_PMA_REG_84823_LED3_STRETCH_EN; + else + val = MDIO_PMA_REG_84823_LED3_STRETCH_EN; + + /* stretch_en for LEDs */ bnx2x_cl45_read_or_write(bp, phy, - MDIO_PMA_DEVAD, offset, - MDIO_PMA_REG_84823_LED3_STRETCH_EN); + MDIO_PMA_DEVAD, + offset, + val); } static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, @@ -9775,7 +9856,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; switch (action) { case PHY_INIT: - if (!bnx2x_is_8483x_8485x(phy)) { + if (bnx2x_is_8483x_8485x(phy)) { /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, bp, params->port); } @@ -10036,15 +10117,20 @@ static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy, static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, - u16 cmd_args[], int argc) + u16 cmd_args[], int argc, int process) { int idx; u16 val; struct bnx2x *bp = params->bp; - /* Write CMD_OPEN_OVERRIDE to STATUS reg */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_STATUS, - PHY84833_STATUS_CMD_OPEN_OVERRIDE); + int rc = 0; + + if (process == PHY84833_MB_PROCESS2) { + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_OPEN_OVERRIDE); + } + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_848xx_CMD_HDLR_STATUS, &val); @@ -10054,15 +10140,27 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, } if (idx >= PHY848xx_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); + /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR + * clear the status to CMD_CLEAR_COMPLETE + */ + if (val == PHY84833_STATUS_CMD_COMPLETE_PASS || + val == PHY84833_STATUS_CMD_COMPLETE_ERROR) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + } return -EINVAL; } - - /* Prepare argument(s) and issue command */ - for (idx = 0; idx < argc; idx++) { - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_DATA1 + idx, - cmd_args[idx]); + if (process == PHY84833_MB_PROCESS1 || + process == PHY84833_MB_PROCESS2) { + /* Prepare argument(s) */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } } + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { @@ -10076,24 +10174,30 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, if ((idx >= PHY848xx_CMDHDLR_WAIT) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { DP(NETIF_MSG_LINK, "FW cmd failed.\n"); - return -EINVAL; + rc = -EINVAL; } - /* Gather returning data */ - for (idx = 0; idx < argc; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_DATA1 + idx, - &cmd_args[idx]); + if (process == PHY84833_MB_PROCESS3 && rc == 0) { + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } } - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_STATUS, - PHY84833_STATUS_CMD_CLEAR_COMPLETE); - return 0; + if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR || + val == PHY84833_STATUS_CMD_COMPLETE_PASS) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + } + return rc; } static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, - u16 cmd_args[], int argc) + u16 cmd_args[], int argc, + int process) { struct bnx2x *bp = params->bp; @@ -10106,7 +10210,7 @@ static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, argc); } else { return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, - argc); + argc, process); } } @@ -10133,7 +10237,7 @@ static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy, status = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_PAIR_SWAP, data, - PHY848xx_CMDHDLR_MAX_ARGS); + 2, PHY84833_MB_PROCESS2); if (status == 0) DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); @@ -10222,8 +10326,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); /* Prevent Phy from working in EEE and advertising it */ - rc = bnx2x_848xx_cmd_hdlr(phy, params, - PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, + &cmd_args, 1, PHY84833_MB_PROCESS1); if (rc) { DP(NETIF_MSG_LINK, "EEE disable failed.\n"); return rc; @@ -10240,8 +10344,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 cmd_args = 1; - rc = bnx2x_848xx_cmd_hdlr(phy, params, - PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, + &cmd_args, 1, PHY84833_MB_PROCESS1); if (rc) { DP(NETIF_MSG_LINK, "EEE enable failed.\n"); return rc; @@ -10362,7 +10466,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, cmd_args[3] = PHY84833_CONSTANT_LATENCY; rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, cmd_args, - PHY848xx_CMDHDLR_MAX_ARGS); + 4, PHY84833_MB_PROCESS1); if (rc) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } @@ -10416,6 +10520,32 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + /* Additional settings for jumbo packets in 1000BASE-T mode */ + /* Allow rx extended length */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_AUX_CTRL, &val); + val |= 0x4000; + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_AUX_CTRL, val); + /* TX FIFO Elasticity LSB */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val); + val |= 0x1; + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val); + /* TX FIFO Elasticity MSB */ + /* Enable expansion register 0x46 (Pattern Generator status) */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46); + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val); + val |= 0x4000; + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val); + } + if (bnx2x_is_8483x_8485x(phy)) { /* Bring PHY out of super isolate mode as the final step. */ bnx2x_cl45_read_and_write(bp, phy, @@ -10555,6 +10685,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, return link_up; } +static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len) +{ + int status = 0; + u32 num; + + num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) | + ((raw_ver & 0xF000) >> 12); + status = bnx2x_3_seq_format_ver(num, str, len); + return status; +} + static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) { int status = 0; @@ -10651,10 +10792,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 0x0); } else { + /* LED 1 OFF */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0); + + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* LED 2 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + /* LED 3 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + } } break; case LED_MODE_FRONT_PANEL_OFF: @@ -10713,6 +10869,19 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_REG_8481_SIGNAL_MASK, 0x0); } + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* LED 2 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + /* LED 3 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + } } break; case LED_MODE_ON: @@ -10776,6 +10945,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, params->port*4, NIG_MASK_MI_INT); } + } + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* Tell LED3 to constant on */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= ~(7<<6); + val |= (2<<6); /* A83B[8:6]= 2 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x20); + } else { bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_SIGNAL_MASK, @@ -10854,6 +11042,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_REG_8481_LINK_SIGNAL, val); if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x18); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x06); + } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { /* Restore LED4 source to external link, * and re-enable interrupts. @@ -11982,7 +12181,7 @@ static const struct bnx2x_phy phy_84858 = { .read_status = (read_status_t)bnx2x_848xx_read_status, .link_reset = (link_reset_t)bnx2x_848x3_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver, .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func @@ -13807,8 +14006,10 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars) if (CHIP_IS_E3(bp)) { struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; bnx2x_set_aer_mmd(params, phy); - if ((phy->supported & SUPPORTED_20000baseKR2_Full) && - (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || + (phy->req_line_speed == SPEED_20000)) bnx2x_check_kr2_wa(params, vars, phy); bnx2x_check_over_curr(params, vars); if (vars->rx_tx_asic_rst) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6c4e3a69976f..2bf9c871144f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -5280,14 +5280,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, { unsigned long ramrod_flags = 0; int rc = 0; - u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); + u32 cid = echo & BNX2X_SWCID_MASK; struct bnx2x_vlan_mac_obj *vlan_mac_obj; /* Always push next commands out, don't wait here */ __set_bit(RAMROD_CONT, &ramrod_flags); - switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) - >> BNX2X_SWCID_SHIFT) { + switch (echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) @@ -5308,8 +5308,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, bnx2x_handle_mcast_eqe(bp); return; default: - BNX2X_ERR("Unsupported classification command: %d\n", - elem->message.data.eth_event.echo); + BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); return; } @@ -5478,9 +5477,6 @@ static void bnx2x_eq_int(struct bnx2x *bp) goto next_spqe; } - /* elem CID originates from FW; actually LE */ - cid = SW_CID((__force __le32) - elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ @@ -5503,6 +5499,10 @@ static void bnx2x_eq_int(struct bnx2x *bp) * we may want to verify here that the bp state is * HALTING */ + + /* elem CID originates from FW; actually LE */ + cid = SW_CID(elem->message.data.cfc_del_event.cid); + DP(BNX2X_MSG_SP, "got delete ramrod for MULTI[%d]\n", cid); @@ -5596,10 +5596,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_OPENING_WAIT4_PORT): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): - cid = elem->message.data.eth_event.echo & - BNX2X_SWCID_MASK; DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", - cid); + SW_CID(elem->message.data.eth_event.echo)); rss_raw->clear_pending(rss_raw); break; @@ -5684,7 +5682,7 @@ static void bnx2x_sp_task(struct work_struct *work) if (status & BNX2X_DEF_SB_IDX) { struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - if (FCOE_INIT(bp) && + if (FCOE_INIT(bp) && (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* Prevent local bottom-halves from running as * we are going to change the local NAPI list. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 4dead49bd5cb..a43dea259b12 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -7296,6 +7296,8 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 #define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 +/* BCM84858 only */ +#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000 /* BCM84833 only */ #define MDIO_84833_TOP_CFG_FW_REV 0x400f @@ -7337,6 +7339,10 @@ Theotherbitsarereservedandshouldbezero*/ #define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 #define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 +/* Mailbox Process */ +#define PHY84833_MB_PROCESS1 1 +#define PHY84833_MB_PROCESS2 2 +#define PHY84833_MB_PROCESS3 3 /* Mailbox status set used by 84858 only */ #define PHY84858_STATUS_CMD_RECEIVED 0x0001 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 9d027348cd09..632daff117d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, { unsigned long ramrod_flags = 0; int rc = 0; + u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); /* Always push next commands out, don't wait here */ set_bit(RAMROD_CONT, &ramrod_flags); - switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + switch (echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, &ramrod_flags); @@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, &ramrod_flags); break; default: - BNX2X_ERR("Unsupported classification command: %d\n", - elem->message.data.eth_event.echo); + BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); return; } if (rc < 0) @@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) switch (opcode) { case EVENT_RING_OPCODE_CFC_DEL: - cid = SW_CID((__force __le32) - elem->message.data.cfc_del_event.cid); + cid = SW_CID(elem->message.data.cfc_del_event.cid); DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); break; case EVENT_RING_OPCODE_CLASSIFICATION_RULES: case EVENT_RING_OPCODE_MULTICAST_RULES: case EVENT_RING_OPCODE_FILTERS_RULES: case EVENT_RING_OPCODE_RSS_UPDATE_RULES: - cid = (elem->message.data.eth_event.echo & - BNX2X_SWCID_MASK); + cid = SW_CID(elem->message.data.eth_event.echo); DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); break; case EVENT_RING_OPCODE_VF_FLR: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 1374e5394a79..bfae300cf25f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp, /* Update VFDB with current message and schedule its handling */ mutex_lock(&BP_VFDB(bp)->event_mutex); - BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; - BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = + le32_to_cpu(vfpf_event->msg_addr_hi); + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = + le32_to_cpu(vfpf_event->msg_addr_lo); BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); mutex_unlock(&BP_VFDB(bp)->event_mutex); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index df835f5e46d8..82f191382989 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -69,7 +69,7 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define BNXT_RX_DMA_OFFSET NET_SKB_PAD #define BNXT_RX_COPY_THRESH 256 -#define BNXT_TX_PUSH_THRESH 92 +#define BNXT_TX_PUSH_THRESH 164 enum board_idx { BCM57301, @@ -223,11 +223,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { - struct tx_push_bd *push = txr->tx_push; - struct tx_bd *tx_push = &push->txbd1; - struct tx_bd_ext *tx_push1 = &push->txbd2; - void *pdata = tx_push1 + 1; - int j; + struct tx_push_buffer *tx_push_buf = txr->tx_push; + struct tx_push_bd *tx_push = &tx_push_buf->push_bd; + struct tx_bd_ext *tx_push1 = &tx_push->txbd2; + void *pdata = tx_push_buf->data; + u64 *end; + int j, push_len; /* Set COAL_NOW to be ready quickly for the next push */ tx_push->tx_bd_len_flags_type = @@ -247,6 +248,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + end = pdata + length; + end = PTR_ALIGN(end, 8) - 1; + *end = 0; + skb_copy_from_linear_data(skb, pdata, len); pdata += len; for (j = 0; j < last_frag; j++) { @@ -261,22 +266,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) pdata += skb_frag_size(frag); } - memcpy(txbd, tx_push, sizeof(*txbd)); + txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; + txbd->tx_bd_haddr = txr->data_mapping; prod = NEXT_TX(prod); txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; memcpy(txbd, tx_push1, sizeof(*txbd)); prod = NEXT_TX(prod); - push->doorbell = + tx_push->doorbell = cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); txr->tx_prod = prod; netdev_tx_sent_queue(txq, skb->len); - __iowrite64_copy(txr->tx_doorbell, push, - (length + sizeof(*push) + 8) / 8); + push_len = (length + sizeof(*tx_push) + 7) / 8; + if (push_len > 16) { + __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); + __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1, + push_len - 16); + } else { + __iowrite64_copy(txr->tx_doorbell, tx_push_buf, + push_len); + } tx_buf->is_push = 1; - goto tx_done; } @@ -1490,10 +1502,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) last = tx_buf->nr_frags; j += 2; - for (k = 0; k < last; k++, j = NEXT_TX(j)) { + for (k = 0; k < last; k++, j++) { + int ring_idx = j & bp->tx_ring_mask; skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; - tx_buf = &txr->tx_buf_ring[j]; + tx_buf = &txr->tx_buf_ring[ring_idx]; dma_unmap_page( &pdev->dev, dma_unmap_addr(tx_buf, mapping), @@ -1752,7 +1765,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + bp->tx_push_thresh); - if (push_size > 128) { + if (push_size > 256) { push_size = 0; bp->tx_push_thresh = 0; } @@ -1771,7 +1784,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) return rc; if (bp->tx_push_size) { - struct tx_bd *txbd; dma_addr_t mapping; /* One pre-allocated DMA buffer to backup @@ -1785,13 +1797,11 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) if (!txr->tx_push) return -ENOMEM; - txbd = &txr->tx_push->txbd1; - mapping = txr->tx_push_mapping + sizeof(struct tx_push_bd); - txbd->tx_bd_haddr = cpu_to_le64(mapping); + txr->data_mapping = cpu_to_le64(mapping); - memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); + memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } ring->queue_id = bp->q_info[j].queue_id; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) @@ -3406,7 +3416,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; u16 error_code; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); req.ring_type = ring_type; req.ring_id = cpu_to_le16(ring->fw_ring_id); @@ -4545,20 +4555,18 @@ static int bnxt_update_phy_setting(struct bnxt *bp) if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && link_info->force_pause_setting != link_info->req_flow_ctrl) update_pause = true; - if (link_info->req_duplex != link_info->duplex_setting) - update_link = true; if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { if (BNXT_AUTO_MODE(link_info->auto_mode)) update_link = true; if (link_info->req_link_speed != link_info->force_link_speed) update_link = true; + if (link_info->req_duplex != link_info->duplex_setting) + update_link = true; } else { if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) update_link = true; if (link_info->advertising != link_info->auto_link_speeds) update_link = true; - if (link_info->req_link_speed != link_info->auto_link_speed) - update_link = true; } if (update_link) @@ -4635,7 +4643,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (link_re_init) { rc = bnxt_update_phy_setting(bp); if (rc) - goto open_err; + netdev_warn(bp->dev, "failed to update phy settings\n"); } if (irq_re_init) { @@ -4653,6 +4661,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Enable TX queues */ bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); + bnxt_update_link(bp, true); return 0; @@ -4819,8 +4828,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } @@ -5671,22 +5678,16 @@ static int bnxt_probe_phy(struct bnxt *bp) } /*initialize the ethool setting copy with NVM settings */ - if (BNXT_AUTO_MODE(link_info->auto_mode)) - link_info->autoneg |= BNXT_AUTONEG_SPEED; - - if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { - if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH) - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + link_info->autoneg = BNXT_AUTONEG_SPEED | + BNXT_AUTONEG_FLOW_CTRL; + link_info->advertising = link_info->auto_link_speeds; link_info->req_flow_ctrl = link_info->auto_pause_setting; - } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { + } else { + link_info->req_link_speed = link_info->force_link_speed; + link_info->req_duplex = link_info->duplex_setting; link_info->req_flow_ctrl = link_info->force_pause_setting; } - link_info->req_duplex = link_info->duplex_setting; - if (link_info->autoneg & BNXT_AUTONEG_SPEED) - link_info->req_link_speed = link_info->auto_link_speed; - else - link_info->req_link_speed = link_info->force_link_speed; - link_info->advertising = link_info->auto_link_speeds; snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1], diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 8af3ca8efcef..2be51b332652 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -411,8 +411,8 @@ struct rx_tpa_end_cmp_ext { #define BNXT_NUM_TESTS(bp) 0 -#define BNXT_DEFAULT_RX_RING_SIZE 1023 -#define BNXT_DEFAULT_TX_RING_SIZE 512 +#define BNXT_DEFAULT_RX_RING_SIZE 511 +#define BNXT_DEFAULT_TX_RING_SIZE 511 #define MAX_TPA 64 @@ -523,10 +523,16 @@ struct bnxt_ring_struct { struct tx_push_bd { __le32 doorbell; - struct tx_bd txbd1; + __le32 tx_bd_len_flags_type; + u32 tx_bd_opaque; struct tx_bd_ext txbd2; }; +struct tx_push_buffer { + struct tx_push_bd push_bd; + u32 data[25]; +}; + struct bnxt_tx_ring_info { struct bnxt_napi *bnapi; u16 tx_prod; @@ -538,8 +544,9 @@ struct bnxt_tx_ring_info { dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; - struct tx_push_bd *tx_push; + struct tx_push_buffer *tx_push; dma_addr_t tx_push_mapping; + __le64 data_mapping; #define BNXT_DEV_STATE_CLOSING 0x1 u32 dev_state; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 922b898e7a32..3238817dfd5f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -486,15 +486,8 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) speed_mask |= SUPPORTED_2500baseX_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) speed_mask |= SUPPORTED_10000baseT_Full; - /* TODO: support 25GB, 50GB with different cable type */ - if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) - speed_mask |= SUPPORTED_20000baseMLD2_Full | - SUPPORTED_20000baseKR2_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) - speed_mask |= SUPPORTED_40000baseKR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseLR4_Full; + speed_mask |= SUPPORTED_40000baseCR4_Full; return speed_mask; } @@ -514,15 +507,8 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) speed_mask |= ADVERTISED_2500baseX_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) speed_mask |= ADVERTISED_10000baseT_Full; - /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/ - if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) - speed_mask |= ADVERTISED_20000baseMLD2_Full | - ADVERTISED_20000baseKR2_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) - speed_mask |= ADVERTISED_40000baseKR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseLR4_Full; + speed_mask |= ADVERTISED_40000baseCR4_Full; return speed_mask; } @@ -557,11 +543,12 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) u16 ethtool_speed; cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); + cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; if (link_info->auto_link_speeds) cmd->supported |= SUPPORTED_Autoneg; - if (BNXT_AUTO_MODE(link_info->auto_mode)) { + if (link_info->autoneg) { cmd->advertising = bnxt_fw_to_ethtool_advertised_spds(link_info); cmd->advertising |= ADVERTISED_Autoneg; @@ -570,28 +557,16 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->autoneg = AUTONEG_DISABLE; cmd->advertising = 0; } - if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) { if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) { cmd->advertising |= ADVERTISED_Pause; - cmd->supported |= SUPPORTED_Pause; } else { cmd->advertising |= ADVERTISED_Asym_Pause; - cmd->supported |= SUPPORTED_Asym_Pause; if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) cmd->advertising |= ADVERTISED_Pause; } - } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { - if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) == - BNXT_LINK_PAUSE_BOTH) { - cmd->supported |= SUPPORTED_Pause; - } else { - cmd->supported |= SUPPORTED_Asym_Pause; - if (link_info->force_pause_setting & - BNXT_LINK_PAUSE_RX) - cmd->supported |= SUPPORTED_Pause; - } } cmd->port = PORT_NONE; @@ -670,6 +645,9 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising) if (advertising & ADVERTISED_10000baseT_Full) fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; + if (advertising & ADVERTISED_40000baseCR4_Full) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; + return fw_speed_mask; } @@ -729,7 +707,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = ethtool_cmd_speed(cmd); link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; - link_info->autoneg &= ~BNXT_AUTONEG_SPEED; + link_info->autoneg = 0; link_info->advertising = 0; } @@ -748,8 +726,7 @@ static void bnxt_get_pauseparam(struct net_device *dev, if (BNXT_VF(bp)) return; - epause->autoneg = !!(link_info->auto_pause_setting & - BNXT_LINK_PAUSE_BOTH); + epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); } @@ -765,6 +742,9 @@ static int bnxt_set_pauseparam(struct net_device *dev, return rc; if (epause->autoneg) { + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) + return -EINVAL; + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; } else { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index b15a60d787c7..d7e01a74e927 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2445,8 +2445,7 @@ static void bcmgenet_irq_task(struct work_struct *work) } /* Link UP/DOWN event */ - if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && - (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) { + if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { phy_mac_interrupt(priv->phydev, !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 0d775964b060..457c3bc8cfff 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -401,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev) * Ethernet MAC ISRs */ if (priv->internal_phy) - priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT; + priv->phydev->irq = PHY_IGNORE_INTERRUPT; return 0; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 9293675df7ba..3010080cfeee 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, return ret; } +static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) +{ + /* Check if we will never have enough descriptors, + * as gso_segs can be more than current ring size + */ + return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; +} + static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); /* Use GSO to workaround all TSO packets that meet HW bug conditions @@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * vlan encapsulated. */ if (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - return tg3_tso_bug(tp, tnapi, txq, skb); + skb->protocol == htons(ETH_P_8021AD)) { + if (tg3_tso_bug_gso_check(tnapi, skb)) + return tg3_tso_bug(tp, tnapi, txq, skb); + goto drop; + } if (!skb_is_gso_v6(skb)) { if (unlikely((ETH_HLEN + hdr_len) > 80) && - tg3_flag(tp, TSO_BUG)) - return tg3_tso_bug(tp, tnapi, txq, skb); - + tg3_flag(tp, TSO_BUG)) { + if (tg3_tso_bug_gso_check(tnapi, skb)) + return tg3_tso_bug(tp, tnapi, txq, skb); + goto drop; + } ip_csum = iph->check; ip_tot_len = iph->tot_len; iph->check = 0; @@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (would_hit_hwbug) { tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); - if (mss) { + if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { /* If it's a TSO packet, do GSO instead of * allocating and copying to a large linear SKB */ @@ -12016,7 +12029,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, int ret; u32 offset, len, b_offset, odd_len; u8 *buf; - __be32 start, end; + __be32 start = 0, end; if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 04b0d16b210e..95bc470ae441 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) if (!list_empty(&rxf->ucast_pending_add_q)) { mac = list_first_entry(&rxf->ucast_pending_add_q, struct bna_mac, qe); - list_add_tail(&mac->qe, &rxf->ucast_active_q); + list_move_tail(&mac->qe, &rxf->ucast_active_q); bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); return 1; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 9d9984a87d42..50c94104f19c 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2823,7 +2823,7 @@ static int macb_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device_node *phy_node; const struct macb_config *macb_config = NULL; - struct clk *pclk, *hclk, *tx_clk; + struct clk *pclk, *hclk = NULL, *tx_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; bool native_io; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index b89504405b72..34d269cd5579 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct lio *lio = container_of(ptp, struct lio, ptp_info); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; @@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, ns += lio->ptp_adjust; spin_unlock_irqrestore(&lio->ptp_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -1685,7 +1683,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); /* droq creation and local register settings. */ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); - if (ret_val == -1) + if (ret_val < 0) return ret_val; if (ret_val == 1) { @@ -2526,7 +2524,7 @@ static void handle_timestamp(struct octeon_device *oct, octeon_swap_8B_data(&resp->timestamp, 1); - if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { struct skb_shared_hwtstamps ts; u64 ns = resp->timestamp; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 4dba86eaa045..174072b3740b 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -983,5 +983,5 @@ int octeon_create_droq(struct octeon_device *oct, create_droq_fail: octeon_delete_droq(oct, q_no); - return -1; + return -ENOMEM; } diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 688828865c48..34e9acea8747 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -116,6 +116,15 @@ #define NIC_PF_INTR_ID_MBOX0 8 #define NIC_PF_INTR_ID_MBOX1 9 +/* Minimum FIFO level before all packets for the CQ are dropped + * + * This value ensures that once a packet has been "accepted" + * for reception it will not get dropped due to non-availability + * of CQ descriptor. An errata in HW mandates this value to be + * atleast 0x100. + */ +#define NICPF_CQM_MIN_DROP_LEVEL 0x100 + /* Global timer for CQ timer thresh interrupts * Calculated for SCLK of 700Mhz * value written should be a 1/16th of what is expected diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 4dded90076c8..95f17f8cadac 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) static void nic_init_hw(struct nicpf *nic) { int i; + u64 cqm_cfg; /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); @@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic) /* Enable VLAN ethertype matching and stripping */ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); + + /* Check if HW expected value is higher (could be in future chips) */ + cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); + if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) + nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); } /* Channel parse index configuration */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index dd536be20193..afb10e326b4f 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -21,7 +21,7 @@ #define NIC_PF_TCP_TIMER (0x0060) #define NIC_PF_BP_CFG (0x0080) #define NIC_PF_RRM_CFG (0x0088) -#define NIC_PF_CQM_CF (0x00A0) +#define NIC_PF_CQM_CFG (0x00A0) #define NIC_PF_CNM_CF (0x00A8) #define NIC_PF_CNM_STATUS (0x00B0) #define NIC_PF_CQ_AVG_CFG (0x00C0) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c24cb2a86a42..a009bc30dc4d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -574,8 +574,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev, static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct napi_struct *napi, - struct cmp_queue *cq, - struct cqe_rx_t *cqe_rx, int cqe_type) + struct cqe_rx_t *cqe_rx) { struct sk_buff *skb; struct nicvf *nic = netdev_priv(netdev); @@ -591,7 +590,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, } /* Check for errors */ - err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); + err = nicvf_check_cqe_rx_errs(nic, cqe_rx); if (err && !cqe_rx->rb_cnt) return; @@ -682,8 +681,7 @@ loop: cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: - nicvf_rcv_pkt_handler(netdev, napi, cq, - cq_desc, CQE_TYPE_RX); + nicvf_rcv_pkt_handler(netdev, napi, cq_desc); work_done++; break; case CQE_TYPE_SEND: @@ -1125,7 +1123,6 @@ int nicvf_stop(struct net_device *netdev) /* Clear multiqset info */ nic->pnicvf = nic; - nic->sqs_count = 0; return 0; } @@ -1354,6 +1351,9 @@ void nicvf_update_stats(struct nicvf *nic) drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok; + drv_stats->rx_frames_ok = stats->rx_ucast_frames + + stats->rx_bcast_frames + + stats->rx_mcast_frames; drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun; drv_stats->tx_drops = stats->tx_drops; @@ -1538,6 +1538,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nicvf_send_vf_struct(nic); + if (!pass1_silicon(nic->pdev)) + nic->hw_tso = true; + /* Check if this VF is in QS only mode */ if (nic->sqs_mode) return 0; @@ -1557,9 +1560,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; - if (!pass1_silicon(nic->pdev)) - nic->hw_tso = true; - netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d0d1b5490061..767347b1f631 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1329,16 +1329,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) } /* Check for errors in the receive cmp.queue entry */ -int nicvf_check_cqe_rx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { struct nicvf_hw_stats *stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; - if (!cqe_rx->err_level && !cqe_rx->err_opcode) { - drv_stats->rx_frames_ok++; + if (!cqe_rx->err_level && !cqe_rx->err_opcode) return 0; - } if (netif_msg_rx_err(nic)) netdev_err(nic->netdev, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index c5030a7f213a..6673e1133523 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -338,8 +338,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, /* Stats */ void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); -int nicvf_check_cqe_rx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx); #endif /* NICVF_QUEUES_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index ee04caa6c4d8..a89721fad633 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable) return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } +static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val) +{ + char tok[len + 1]; + + memcpy(tok, s, len); + tok[len] = 0; + return kstrtouint(strim(tok), base, val); +} + +static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val) +{ + char tok[len + 1]; + + memcpy(tok, s, len); + tok[len] = 0; + return kstrtou16(strim(tok), base, val); +} + /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read @@ -709,19 +727,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) return ret; } - ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); + ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk); if (ret) return ret; - ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); + ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk); if (ret) return ret; - ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); + ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk); if (ret) return ret; - ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); + ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc); if (ret) return ret; - ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); + ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing); if (ret) return ret; memcpy(p->sn, vpd.sn_data, SERNUM_LEN); @@ -733,10 +751,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) } else { p->port_type[0] = hex_to_bin(vpd.port0_data[0]); p->port_type[1] = hex_to_bin(vpd.port1_data[0]); - ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); + ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16, + &p->xauicfg[0]); if (ret) return ret; - ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); + ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16, + &p->xauicfg[1]); if (ret) return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index a8dda635456d..06bc2d2e7a73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -165,6 +165,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 1671fa3332c2..7ba6d530b0c0 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.3.0.12" +#define DRV_VERSION "2.3.0.20" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 1ffd1050860b..1fdf5fe12a95 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct devcmd2_controller *dc2c = vdev->devcmd2; - struct devcmd2_result *result = dc2c->result + dc2c->next_result; + struct devcmd2_result *result; + u8 color; unsigned int i; int delay, err; u32 fetch_index, new_posted; @@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) return 0; + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + for (delay = 0; delay < wait; delay++) { - if (result->color == dc2c->color) { - dc2c->next_result++; - if (dc2c->next_result == dc2c->result_size) { - dc2c->next_result = 0; - dc2c->color = dc2c->color ? 0 : 1; - } + if (result->color == color) { if (result->error) { err = result->error; if (err != ERR_ECMDUNKNOWN || diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index cf94b72dbacd..48d91941408d 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -128,7 +128,6 @@ struct board_info { struct resource *data_res; struct resource *addr_req; /* resources requested */ struct resource *data_req; - struct resource *irq_res; int irq_wake; @@ -1300,22 +1299,16 @@ static int dm9000_open(struct net_device *dev) { struct board_info *db = netdev_priv(dev); - unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); - /* If there is no IRQ type specified, default to something that - * may work, and tell the user that this is a problem */ - - if (irqflags == IRQF_TRIGGER_NONE) - irqflags = irq_get_trigger_type(dev->irq); - - if (irqflags == IRQF_TRIGGER_NONE) + /* If there is no IRQ type specified, tell the user that this is a + * problem + */ + if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); - irqflags |= IRQF_SHARED; - /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ @@ -1323,7 +1316,8 @@ dm9000_open(struct net_device *dev) /* Initialize DM9000 board */ dm9000_init_dm9000(dev); - if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) + if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, + dev->name, dev)) return -EAGAIN; /* Now that we have an interrupt handler hooked up we can unmask * our interrupts @@ -1500,15 +1494,22 @@ dm9000_probe(struct platform_device *pdev) db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (db->addr_res == NULL || db->data_res == NULL || - db->irq_res == NULL) { - dev_err(db->dev, "insufficient resources\n"); + if (!db->addr_res || !db->data_res) { + dev_err(db->dev, "insufficient resources addr=%p data=%p\n", + db->addr_res, db->data_res); ret = -ENOENT; goto out; } + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + dev_err(db->dev, "interrupt resource unavailable: %d\n", + ndev->irq); + ret = ndev->irq; + goto out; + } + db->irq_wake = platform_get_irq(pdev, 1); if (db->irq_wake >= 0) { dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); @@ -1570,7 +1571,6 @@ dm9000_probe(struct platform_device *pdev) /* fill in parameters for net-dev structure */ ndev->base_addr = (unsigned long)db->io_addr; - ndev->irq = db->irq_res->start; /* ensure at least we have a default set of IO routines */ dm9000_set_io(db, iosize); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index cf837831304b..f9751294ece7 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -531,6 +531,7 @@ struct be_adapter { struct delayed_work be_err_detection_work; u8 err_flags; + bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */ u32 flags; u32 cmd_privileges; /* Ethtool knobs and info */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 241819b36ca7..6d9a8d78e8ad 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -622,10 +622,13 @@ enum be_if_flags { BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_MCAST_PROMISCUOUS) -#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) +#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \ + BE_IF_FLAGS_PASS_L3L4_ERRORS | \ + BE_IF_FLAGS_UNTAGGED) -#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) +#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \ + BE_IF_FLAGS_MULTICAST | \ + BE_IF_FLAGS_ALL_PROMISCUOUS) /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index f99de3657ce3..d1cf1274fc2f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = { "Unknown" }; +#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \ + BE_IF_FLAGS_BROADCAST | \ + BE_IF_FLAGS_MULTICAST | \ + BE_IF_FLAGS_PASS_L3L4_ERRORS) + static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; @@ -3537,7 +3542,7 @@ static int be_enable_if_filters(struct be_adapter *adapter) { int status; - status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); + status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON); if (status) return status; @@ -3857,8 +3862,7 @@ static int be_vfs_if_create(struct be_adapter *adapter) int status; /* If a FW profile exists, then cap_flags are updated */ - cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; + cap_flags = BE_VF_IF_EN_FLAGS; for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { @@ -3874,10 +3878,8 @@ static int be_vfs_if_create(struct be_adapter *adapter) } } - en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | - BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | - BE_IF_FLAGS_PASS_L3L4_ERRORS); + /* PF should enable IF flags during proxy if_create call */ + en_flags = cap_flags & BE_VF_IF_EN_FLAGS; status = be_cmd_if_create(adapter, cap_flags, en_flags, &vf_cfg->if_handle, vf + 1); if (status) @@ -4968,6 +4970,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); + if (adapter->pcicfg && adapter->pcicfg_mapped) + pci_iounmap(adapter->pdev, adapter->pcicfg); } static int db_bar(struct be_adapter *adapter) @@ -5019,8 +5023,10 @@ static int be_map_pci_bars(struct be_adapter *adapter) if (!addr) goto pci_map_err; adapter->pcicfg = addr; + adapter->pcicfg_mapped = true; } else { adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + adapter->pcicfg_mapped = false; } } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 62fa136554ac..41b010645100 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev) if (priv->mdio) { mdiobus_unregister(priv->mdio); - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } if (priv->clk) diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig index 48ecbc8aaaea..b423ad380b6a 100644 --- a/drivers/net/ethernet/ezchip/Kconfig +++ b/drivers/net/ethernet/ezchip/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP config EZCHIP_NPS_MANAGEMENT_ENET tristate "EZchip NPS management enet support" depends on OF_IRQ && OF_NET + depends on HAS_IOMEM ---help--- Simple LAN device for debug or management purposes. Device supports interrupts for RX and TX(completion). diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 4097c58d17a7..cbe21dc7e37e 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -4,6 +4,9 @@ obj-$(CONFIG_FEC) += fec.o fec-objs :=fec_main.o fec_ptp.o +CFLAGS_fec_main.o := -D__CHECK_ENDIAN__ +CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__ + obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 99d33e2d35e6..2106d72c91dc 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -19,8 +19,7 @@ #include <linux/timecounter.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ - defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models @@ -190,28 +189,45 @@ /* * Define the buffer descriptor structure. + * + * Evidently, ARM SoCs have the FEC block generated in a + * little endian mode so adjust endianness accordingly. */ -#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) +#if defined(CONFIG_ARM) +#define fec32_to_cpu le32_to_cpu +#define fec16_to_cpu le16_to_cpu +#define cpu_to_fec32 cpu_to_le32 +#define cpu_to_fec16 cpu_to_le16 +#define __fec32 __le32 +#define __fec16 __le16 + struct bufdesc { - unsigned short cbd_datlen; /* Data length */ - unsigned short cbd_sc; /* Control and status info */ - unsigned long cbd_bufaddr; /* Buffer address */ + __fec16 cbd_datlen; /* Data length */ + __fec16 cbd_sc; /* Control and status info */ + __fec32 cbd_bufaddr; /* Buffer address */ }; #else +#define fec32_to_cpu be32_to_cpu +#define fec16_to_cpu be16_to_cpu +#define cpu_to_fec32 cpu_to_be32 +#define cpu_to_fec16 cpu_to_be16 +#define __fec32 __be32 +#define __fec16 __be16 + struct bufdesc { - unsigned short cbd_sc; /* Control and status info */ - unsigned short cbd_datlen; /* Data length */ - unsigned long cbd_bufaddr; /* Buffer address */ + __fec16 cbd_sc; /* Control and status info */ + __fec16 cbd_datlen; /* Data length */ + __fec32 cbd_bufaddr; /* Buffer address */ }; #endif struct bufdesc_ex { struct bufdesc desc; - unsigned long cbd_esc; - unsigned long cbd_prot; - unsigned long cbd_bdu; - unsigned long ts; - unsigned short res0[4]; + __fec32 cbd_esc; + __fec32 cbd_prot; + __fec32 cbd_bdu; + __fec32 ts; + __fec16 res0[4]; }; /* diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 502da6f48f95..41c81f6ec630 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev) bdp = txq->tx_bd_base; do { - pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", + pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", index, bdp == txq->cur_tx ? 'S' : ' ', bdp == txq->dirty_tx ? 'H' : ' ', - bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, + fec16_to_cpu(bdp->cbd_sc), + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), txq->tx_skbuff[index]); bdp = fec_enet_get_nextdesc(bdp, fep, 0); index++; @@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bdp = fec_enet_get_nextdesc(bdp, fep, queue); ebdp = (struct bufdesc_ex *)bdp; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); frag_len = skb_shinfo(skb)->frags[frag].size; @@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; @@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, goto dma_mapping_error; } - bdp->cbd_bufaddr = addr; - bdp->cbd_datlen = frag_len; - bdp->cbd_sc = status; + bdp->cbd_bufaddr = cpu_to_fec32(addr); + bdp->cbd_datlen = cpu_to_fec16(frag_len); + bdp->cbd_sc = cpu_to_fec16(status); } return bdp; @@ -445,8 +447,8 @@ dma_mapping_error: bdp = txq->cur_tx; for (i = 0; i < frag; i++) { bdp = fec_enet_get_nextdesc(bdp, fep, queue); - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); + dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); } return ERR_PTR(-ENOMEM); } @@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, /* Fill in a Tx ring entry */ bdp = txq->cur_tx; last_bdp = bdp; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; /* Set buffer length and buffer pointer */ @@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ txq->tx_skbuff[index] = skb; - bdp->cbd_datlen = buflen; - bdp->cbd_bufaddr = addr; + bdp->cbd_datlen = cpu_to_fec16(buflen); + bdp->cbd_bufaddr = cpu_to_fec32(addr); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); /* If this was the last BD in the ring, start at the beginning again. */ bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); @@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, unsigned int estatus = 0; dma_addr_t addr; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); @@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, return NETDEV_TX_BUSY; } - bdp->cbd_datlen = size; - bdp->cbd_bufaddr = addr; + bdp->cbd_datlen = cpu_to_fec16(size); + bdp->cbd_bufaddr = cpu_to_fec32(addr); if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) @@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } /* Handle the last BD specially */ @@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, if (is_last) { status |= BD_ENET_TX_INTR; if (fep->bufdesc_ex) - ebdp->cbd_esc |= BD_ENET_TX_INT; + ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); } - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); return 0; } @@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, unsigned short status; unsigned int estatus = 0; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); @@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, } } - bdp->cbd_bufaddr = dmabuf; - bdp->cbd_datlen = hdr_len; + bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); + bdp->cbd_datlen = cpu_to_fec16(hdr_len); if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) @@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); return 0; } @@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev) /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); else - bdp->cbd_sc = 0; + bdp->cbd_sc = cpu_to_fec16(0); bdp = fec_enet_get_nextdesc(bdp, fep, q); } /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, fep, q); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); rxq->cur_rx = rxq->rx_bd_base; } @@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->tx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; + bdp->cbd_sc = cpu_to_fec16(0); if (txq->tx_skbuff[i]) { dev_kfree_skb_any(txq->tx_skbuff[i]); txq->tx_skbuff[i] = NULL; } - bdp->cbd_bufaddr = 0; + bdp->cbd_bufaddr = cpu_to_fec32(0); bdp = fec_enet_get_nextdesc(bdp, fep, q); } /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, fep, q); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); txq->dirty_tx = bdp; } } @@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev) */ if (fep->quirks & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); + writel((__force u32)cpu_to_be32(temp_mac[0]), + fep->hwp + FEC_ADDR_LOW); + writel((__force u32)cpu_to_be32(temp_mac[1]), + fep->hwp + FEC_ADDR_HIGH); } /* Clear any outstanding interrupt. */ @@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) while (bdp != READ_ONCE(txq->cur_tx)) { /* Order the load of cur_tx and cbd_sc */ rmb(); - status = READ_ONCE(bdp->cbd_sc); + status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); if (status & BD_ENET_TX_READY) break; @@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; + if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + bdp->cbd_bufaddr = cpu_to_fec32(0); if (!skb) { bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); continue; @@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) struct skb_shared_hwtstamps shhwtstamps; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); skb_tstamp_tx(skb, &shhwtstamps); } @@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff if (off) skb_reserve(skb, fep->rx_align + 1 - off); - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); + if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { if (net_ratelimit()) netdev_err(ndev, "Rx DMA memory map failed\n"); return -ENOMEM; @@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, if (!new_skb) return false; - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_sync_single_for_cpu(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); if (!swap) @@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) */ bdp = rxq->cur_rx; - while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { if (pkt_received >= budget) break; @@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* Process the incoming frame. */ ndev->stats.rx_packets++; - pkt_len = bdp->cbd_datlen; + pkt_len = fec16_to_cpu(bdp->cbd_datlen); ndev->stats.rx_bytes += pkt_len; index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); @@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ndev->stats.rx_dropped++; goto rx_processing_done; } - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); } @@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* If this is a VLAN packet remove the VLAN Tag */ vlan_packet_rcvd = false; if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + fep->bufdesc_ex && + (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { /* Push and remove the vlan tag */ struct vlan_hdr *vlan_header = (struct vlan_hdr *) (data + ETH_HLEN); @@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* Get receive timestamp from the skb */ if (fep->hwts_rx_en && fep->bufdesc_ex) - fec_enet_hwtstamp(fep, ebdp->ts, + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), skb_hwtstamps(skb)); if (fep->bufdesc_ex && (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { + if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { /* don't check it */ skb->ip_summed = CHECKSUM_UNNECESSARY; } else { @@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) napi_gro_receive(&fep->napi, skb); if (is_copybreak) { - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_sync_single_for_device(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); } else { @@ -1527,12 +1535,12 @@ rx_processing_done: /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); ebdp->cbd_prot = 0; ebdp->cbd_bdu = 0; } @@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) /* List of registers that can be safety be read to dump them with ethtool */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ - defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) rxq->rx_skbuff[i] = NULL; if (skb) { dma_unmap_single(&fep->pdev->dev, - bdp->cbd_bufaddr, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); dev_kfree_skb(skb); @@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) } rxq->rx_skbuff[i] = skb; - bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); } bdp = fec_enet_get_nextdesc(bdp, fep, queue); @@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, fep, queue); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; err_alloc: @@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) if (!txq->tx_bounce[i]) goto err_alloc; - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; + bdp->cbd_sc = cpu_to_fec16(0); + bdp->cbd_bufaddr = cpu_to_fec32(0); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_TX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); } bdp = fec_enet_get_nextdesc(bdp, fep, queue); @@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, fep, queue); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 623aa1c8ebc6..79a210aaf0bb 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_free; } + fman->dev = &of_dev->dev; + return fman; fman_node_put: @@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev) dev_set_drvdata(dev, fman); - fman->dev = dev; - dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); return 0; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 52e0091b4fb2..1ba359f17ec6 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) cbd_t __iomem *prev_bd; cbd_t __iomem *last_tx_bd; - last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1); /* get the current bd held in TBPTR and scan back from this point */ recheck_bd = curr_tbptr = (cbd_t __iomem *) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2aa7b401cc3b..b9ecf197ad11 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv) if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) priv->errata |= GFAR_ERRATA_12; + /* P2020/P1010 Rev 1; MPC8548 Rev 2 */ if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || - ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || + ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ } #endif diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index a7139f588ad2..678f5018d0be 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -469,8 +469,8 @@ static int fmvj18x_config(struct pcmcia_device *link) goto failed; } /* Read MACID from CIS */ - for (i = 5; i < 11; i++) - dev->dev_addr[i] = buf[i]; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = buf[i + 5]; kfree(buf); } else { if (pcmcia_get_mac_from_cis(link, dev)) diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 74beb1867230..4ccc032633c4 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -25,6 +25,7 @@ config HIX5HD2_GMAC config HIP04_ETH tristate "HISILICON P04 Ethernet support" + depends on HAS_IOMEM # For MFD_SYSCON select MARVELL_PHY select MFD_SYSCON select HNS_MDIO diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index b3645297477e..3bfe36f9405b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = { static int __ae_match(struct device *dev, const void *data) { struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); - const char *ae_id = data; - if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE)) - return 1; - - return 0; + return hdev->dev->of_node == data; } -static struct hnae_ae_dev *find_ae(const char *ae_id) +static struct hnae_ae_dev *find_ae(const struct device_node *ae_node) { struct device *dev; - WARN_ON(!ae_id); + WARN_ON(!ae_node); - dev = class_find_device(hnae_class, NULL, ae_id, __ae_match); + dev = class_find_device(hnae_class, NULL, ae_node, __ae_match); return dev ? cls_to_ae_dev(dev) : NULL; } @@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle); * return handle ptr or ERR_PTR */ struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const char *ae_id, u32 port_id, + const struct device_node *ae_node, + u32 port_id, struct hnae_buf_ops *bops) { struct hnae_ae_dev *dev; @@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, int i, j; int ret; - dev = find_ae(ae_id); + dev = find_ae(ae_node); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 6ca94dc3dda3..1cbcb9fa3fb5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -524,8 +524,11 @@ struct hnae_handle { #define ring_to_dev(ring) ((ring)->q->dev->dev) -struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id, - u32 port_id, struct hnae_buf_ops *bops); +struct hnae_handle *hnae_get_handle(struct device *owner_dev, + const struct device_node *ae_node, + u32 port_id, + struct hnae_buf_ops *bops); + void hnae_put_handle(struct hnae_handle *handle); int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner); void hnae_ae_unregister(struct hnae_ae_dev *dev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 522b264866b4..d4f92ed322d6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, { int ret; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); switch (loop) { + case MAC_INTERNALLOOP_PHY: + ret = 0; + break; case MAC_INTERNALLOOP_SERDES: ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); break; @@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, default: ret = -EINVAL; } + + if (!ret) + hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en); + return ret; } @@ -847,6 +855,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) { struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; + static atomic_t id = ATOMIC_INIT(-1); switch (dsaf_dev->dsaf_ver) { case AE_VERSION_1: @@ -858,6 +867,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) default: break; } + + snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME, + (int)atomic_inc_return(&id)); ae_dev->ops = &hns_dsaf_ops; ae_dev->dev = dsaf_dev->dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 1c33bd06bd5c..38fc5be3870c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) int ret, i; u32 desc_num; u32 buf_size; - const char *name, *mode_str; + const char *mode_str; struct device_node *np = dsaf_dev->dev->of_node; if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) @@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) else dsaf_dev->dsaf_ver = AE_VERSION_2; - ret = of_property_read_string(np, "dsa_name", &name); - if (ret) { - dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret); - return ret; - } - strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE); - dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0'; - ret = of_property_read_string(np, "mode", &mode_str); if (ret) { dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); @@ -238,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) } } +static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) +{ + u16 max_q_per_vf, max_vfn; + u32 q_id, q_num_per_port; + u32 mac_id; + + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + return; + + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, + HNS_DSAF_COMM_SERVICE_NW_IDX, + &max_vfn, &max_q_per_vf); + q_num_per_port = max_vfn * max_q_per_vf; + + for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { + dsaf_set_dev_field(dsaf_dev, + DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_QID_M, + DSAFV2_SERDES_LBK_QID_S, + q_id); + q_id += q_num_per_port; + } +} + /** * hns_dsaf_sw_port_type_cfg - cfg sw type * @dsaf_id: dsa fabric id @@ -699,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); } +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver) || + dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG) + return; + + dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_EN_B, !!en); +} + /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id @@ -1030,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) /* set promisc def queue id */ hns_dsaf_mix_def_qid_cfg(dsaf_dev); + /* set inner loopback queue id */ + hns_dsaf_inner_qid_cfg(dsaf_dev); + /* in non switch mode, set all port to access mode */ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 31c312f9826e..5fea226efaf3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -18,6 +18,7 @@ struct hns_mac_cb; #define DSAF_DRV_NAME "hns_dsaf" #define DSAF_MOD_VERSION "v1.0" +#define DSAF_DEVICE_NAME "dsaf" #define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 @@ -416,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index f0c4f9b09d5b..60d695daa471 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -134,6 +134,7 @@ #define DSAF_XGE_INT_STS_0_REG 0x1C0 #define DSAF_PPE_INT_STS_0_REG 0x1E0 #define DSAF_ROCEE_INT_STS_0_REG 0x200 +#define DSAFV2_SERDES_LBK_0_REG 0x220 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -857,6 +858,10 @@ #define PPEV2_CFG_RSS_TBL_4N3_S 24 #define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) +#define DSAFV2_SERDES_LBK_EN_B 8 +#define DSAFV2_SERDES_LBK_QID_S 0 +#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S) + #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 0e30846a24f8..3f77ff77abbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev) int ret; h = hnae_get_handle(&priv->netdev->dev, - priv->ae_name, priv->port_id, NULL); + priv->ae_node, priv->port_id, NULL); if (IS_ERR_OR_NULL(h)) { ret = PTR_ERR(h); dev_dbg(priv->dev, "has not handle, register notifier!\n"); @@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev) else priv->enet_ver = AE_VERSION_2; - ret = of_property_read_string(node, "ae-name", &priv->ae_name); - if (ret) - goto out_read_string_fail; + priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0); + if (IS_ERR_OR_NULL(priv->ae_node)) { + ret = PTR_ERR(priv->ae_node); + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } ret = of_property_read_u32(node, "port-id", &priv->port_id); if (ret) - goto out_read_string_fail; + goto out_read_prop_fail; hns_init_mac_addr(ndev); @@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); -out_read_string_fail: +out_read_prop_fail: free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 4b75270f014e..c68ab3d34fc2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -51,7 +51,7 @@ struct hns_nic_ops { }; struct hns_nic_priv { - const char *ae_name; + const struct device_node *ae_node; u32 enet_ver; u32 port_id; int phy_mode; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3df22840fcd1..3c4a3bc31a89 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev, switch (loop) { case MAC_INTERNALLOOP_PHY: - if ((phy_dev) && (!phy_dev->is_c45)) + if ((phy_dev) && (!phy_dev->is_c45)) { ret = hns_nic_config_phy_loopback(phy_dev, 0x1); + ret |= h->dev->ops->set_loopback(h, loop, 0x1); + } break; case MAC_INTERNALLOOP_MAC: if ((h->dev->ops->set_loopback) && @@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, struct sk_buff *skb) { struct net_device *ndev; + struct hns_nic_priv *priv; struct hnae_ring *ring; struct netdev_queue *dev_queue; struct sk_buff *new_skb; @@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, char buff[33]; /* 32B data and the last character '\0' */ if (!ring_data) { /* Just for doing create frame*/ + ndev = skb->dev; + priv = netdev_priv(ndev); + frame_size = skb->len; memset(skb->data, 0xFF, frame_size); + if ((!AE_IS_VER1(priv->enet_ver)) && + (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) { + memcpy(skb->data, ndev->dev_addr, 6); + skb->data[5] += 0x1f; + } + frame_size &= ~1ul; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, @@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev, /* place data into test skb */ (void)skb_put(skb, size); + skb->dev = ndev; __lb_other_process(NULL, skb); skb->queue_mapping = NIC_LB_TEST_RING_ID; diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 1d5c3e16d8f4..3daf2d4a7ca0 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = { }; #endif -#ifdef CONFIG_EISA static struct eisa_device_id hp100_eisa_tbl[] = { { "HWPF180" }, /* HP J2577 rev A */ { "HWP1920" }, /* HP 27248B */ @@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = { { "" } /* Mandatory final entry ! */ }; MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); -#endif -#ifdef CONFIG_PCI static const struct pci_device_id hp100_pci_tbl[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, @@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = { {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); -#endif static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; @@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d) free_netdev(d); } -#ifdef CONFIG_EISA static int hp100_eisa_probe(struct device *gendev) { struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); @@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = { .remove = hp100_eisa_remove, } }; -#endif -#ifdef CONFIG_PCI static int hp100_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = { .probe = hp100_pci_probe, .remove = hp100_pci_remove, }; -#endif /* * module section @@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void) err = hp100_isa_init(); if (err && err != -ENODEV) goto out; -#ifdef CONFIG_EISA err = eisa_driver_register(&hp100_eisa_driver); if (err && err != -ENODEV) goto out2; -#endif -#ifdef CONFIG_PCI err = pci_register_driver(&hp100_pci_driver); if (err && err != -ENODEV) goto out3; -#endif out: return err; out3: -#ifdef CONFIG_EISA eisa_driver_unregister (&hp100_eisa_driver); out2: -#endif hp100_isa_cleanup(); goto out; } @@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void) static void __exit hp100_module_exit(void) { hp100_isa_cleanup(); -#ifdef CONFIG_EISA eisa_driver_unregister (&hp100_eisa_driver); -#endif -#ifdef CONFIG_PCI pci_unregister_driver (&hp100_pci_driver); -#endif } module_init(hp100_module_init) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 335417b4756b..ebe60719e489 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1166,7 +1166,10 @@ map_failed: if (!firmware_has_feature(FW_FEATURE_CMO)) netdev_err(netdev, "tx: unable to map xmit buffer\n"); adapter->tx_map_failed++; - skb_linearize(skb); + if (skb_linearize(skb)) { + netdev->stats.tx_dropped++; + goto out; + } force_bounce = 1; goto retry_bounce; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7d6570843723..6e9e16eee5d0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.cmd = REQUEST_CAPABILITY; crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = - cpu_to_be32(adapter->req_tx_entries_per_subcrq); + cpu_to_be64(adapter->req_tx_entries_per_subcrq); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = - cpu_to_be32(adapter->req_rx_add_entries_per_subcrq); + cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); - crq.request_capability.number = cpu_to_be32(adapter->req_mtu); + crq.request_capability.number = cpu_to_be64(adapter->req_mtu); ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { if (adapter->promisc_supported) { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); - crq.request_capability.number = cpu_to_be32(1); + crq.request_capability.number = cpu_to_be64(1); ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); - crq.request_capability.number = cpu_to_be32(0); + crq.request_capability.number = cpu_to_be64(0); ibmvnic_send_crq(adapter, &crq); } @@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, switch (be16_to_cpu(crq->query_capability.capability)) { case MIN_TX_QUEUES: adapter->min_tx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_tx_queues = %lld\n", adapter->min_tx_queues); break; case MIN_RX_QUEUES: adapter->min_rx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_queues = %lld\n", adapter->min_rx_queues); break; case MIN_RX_ADD_QUEUES: adapter->min_rx_add_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_add_queues = %lld\n", adapter->min_rx_add_queues); break; case MAX_TX_QUEUES: adapter->max_tx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_queues = %lld\n", adapter->max_tx_queues); break; case MAX_RX_QUEUES: adapter->max_rx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_queues = %lld\n", adapter->max_rx_queues); break; case MAX_RX_ADD_QUEUES: adapter->max_rx_add_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_add_queues = %lld\n", adapter->max_rx_add_queues); break; case MIN_TX_ENTRIES_PER_SUBCRQ: adapter->min_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", adapter->min_tx_entries_per_subcrq); break; case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: adapter->min_rx_add_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", adapter->min_rx_add_entries_per_subcrq); break; case MAX_TX_ENTRIES_PER_SUBCRQ: adapter->max_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", adapter->max_tx_entries_per_subcrq); break; case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: adapter->max_rx_add_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", adapter->max_rx_add_entries_per_subcrq); break; case TCP_IP_OFFLOAD: adapter->tcp_ip_offload = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "tcp_ip_offload = %lld\n", adapter->tcp_ip_offload); break; case PROMISC_SUPPORTED: adapter->promisc_supported = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "promisc_supported = %lld\n", adapter->promisc_supported); break; case MIN_MTU: - adapter->min_mtu = be32_to_cpu(crq->query_capability.number); + adapter->min_mtu = be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); break; case MAX_MTU: - adapter->max_mtu = be32_to_cpu(crq->query_capability.number); + adapter->max_mtu = be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); break; case MAX_MULTICAST_FILTERS: adapter->max_multicast_filters = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_multicast_filters = %lld\n", adapter->max_multicast_filters); break; case VLAN_HEADER_INSERTION: adapter->vlan_header_insertion = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); if (adapter->vlan_header_insertion) netdev->features |= NETIF_F_HW_VLAN_STAG_TX; netdev_dbg(netdev, "vlan_header_insertion = %lld\n", @@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, break; case MAX_TX_SG_ENTRIES: adapter->max_tx_sg_entries = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", adapter->max_tx_sg_entries); break; case RX_SG_SUPPORTED: adapter->rx_sg_supported = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "rx_sg_supported = %lld\n", adapter->rx_sg_supported); break; case OPT_TX_COMP_SUB_QUEUES: adapter->opt_tx_comp_sub_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", adapter->opt_tx_comp_sub_queues); break; case OPT_RX_COMP_QUEUES: adapter->opt_rx_comp_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", adapter->opt_rx_comp_queues); break; case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: adapter->opt_rx_bufadd_q_per_rx_comp_q = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", adapter->opt_rx_bufadd_q_per_rx_comp_q); break; case OPT_TX_ENTRIES_PER_SUBCRQ: adapter->opt_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", adapter->opt_tx_entries_per_subcrq); break; case OPT_RXBA_ENTRIES_PER_SUBCRQ: adapter->opt_rxba_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", adapter->opt_rxba_entries_per_subcrq); break; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 1242925ad34c..1a9993cc79b5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -319,10 +319,8 @@ struct ibmvnic_capability { u8 first; u8 cmd; __be16 capability; /* one of ibmvnic_capabilities */ + __be64 number; struct ibmvnic_rc rc; - __be32 number; /*FIX: should be __be64, but I'm getting the least - * significant word first - */ } __packed __aligned(8); struct ibmvnic_login { diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index fa593dd3efe1..3772f3ac956e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -83,6 +83,15 @@ config E1000E To compile this driver as a module, choose M here. The module will be called e1000e. +config E1000E_HWTS + bool "Support HW cross-timestamp on PCH devices" + default y + depends on E1000E && X86 + ---help--- + Say Y to enable hardware supported cross-timestamping on PCH + devices. The cross-timestamp is available through the PTP clock + driver precise cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE). + config IGB tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" depends on PCI diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index f7c7804d79e5..0641c0098738 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -528,6 +528,11 @@ #define E1000_RXCW_C 0x20000000 /* Receive config */ #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +/* HH Time Sync */ +#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ + #define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 25a0ad5102d6..e2ff3ef75d5d 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -26,6 +26,12 @@ #include "e1000.h" +#ifdef CONFIG_E1000E_HWTS +#include <linux/clocksource.h> +#include <linux/ktime.h> +#include <asm/tsc.h> +#endif + /** * e1000e_phc_adjfreq - adjust the frequency of the hardware clock * @ptp: ptp clock structure @@ -98,6 +104,78 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } +#ifdef CONFIG_E1000E_HWTS +#define MAX_HW_WAIT_COUNT (3) + +/** + * e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers + * @device: current device time + * @system: system counter value read synchronously with device time + * @ctx: context provided by timekeeping code + * + * Read device and system (ART) clock simultaneously and return the corrected + * clock values in ns. + **/ +static int e1000e_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)ctx; + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + int i; + u32 tsync_ctrl; + cycle_t dev_cycles; + cycle_t sys_cycles; + + tsync_ctrl = er32(TSYNCTXCTL); + tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC | + E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK; + ew32(TSYNCTXCTL, tsync_ctrl); + for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) { + udelay(1); + tsync_ctrl = er32(TSYNCTXCTL); + if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP) + break; + } + + if (i == MAX_HW_WAIT_COUNT) + return -ETIMEDOUT; + + dev_cycles = er32(SYSSTMPH); + dev_cycles <<= 32; + dev_cycles |= er32(SYSSTMPL); + spin_lock_irqsave(&adapter->systim_lock, flags); + *device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles)); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + sys_cycles = er32(PLTSTMPH); + sys_cycles <<= 32; + sys_cycles |= er32(PLTSTMPL); + *system = convert_art_to_tsc(sys_cycles); + + return 0; +} + +/** + * e1000e_phc_getsynctime - Reads the current system/device cross timestamp + * @ptp: ptp clock structure + * @cts: structure containing timestamp + * + * Read device and system (ART) clock simultaneously and return the scaled + * clock values in ns. + **/ +static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + + return get_device_system_crosststamp(e1000e_phc_get_syncdevicetime, + adapter, NULL, xtstamp); +} +#endif/*CONFIG_E1000E_HWTS*/ + /** * e1000e_phc_gettime - Reads the current time from the hardware clock * @ptp: ptp clock structure @@ -236,6 +314,13 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) break; } +#ifdef CONFIG_E1000E_HWTS + /* CPU must have ART and GBe must be from Sunrise Point or greater */ + if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART)) + adapter->ptp_clock_info.getcrosststamp = + e1000e_phc_getcrosststamp; +#endif/*CONFIG_E1000E_HWTS*/ + INIT_DELAYED_WORK(&adapter->systim_overflow_work, e1000e_systim_overflow_work); diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 1d5e0b77062a..0cb4d365e5ad 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -245,6 +245,10 @@ #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */ +#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */ +#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */ +#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bb4612c159fd..8f3b53e0dc46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7117,9 +7117,7 @@ static void i40e_service_task(struct work_struct *work) i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); i40e_sync_filters_subtask(pf); -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) i40e_sync_udp_filters_subtask(pf); -#endif i40e_clean_adminq_subtask(pf); i40e_service_event_complete(pf); @@ -8515,6 +8513,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) } #endif + +#if IS_ENABLED(CONFIG_VXLAN) /** * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * @netdev: This physical port's netdev @@ -8524,7 +8524,6 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) static void i40e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8557,7 +8556,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev, pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; -#endif } /** @@ -8569,7 +8567,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev, static void i40e_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8592,9 +8589,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev, netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } -#endif } +#endif +#if IS_ENABLED(CONFIG_GENEVE) /** * i40e_add_geneve_port - Get notifications about GENEVE ports that come up * @netdev: This physical port's netdev @@ -8604,7 +8602,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev, static void i40e_add_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8639,7 +8636,6 @@ static void i40e_add_geneve_port(struct net_device *netdev, pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); -#endif } /** @@ -8651,7 +8647,6 @@ static void i40e_add_geneve_port(struct net_device *netdev, static void i40e_del_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8677,8 +8672,8 @@ static void i40e_del_geneve_port(struct net_device *netdev, netdev_warn(netdev, "geneve port %d was not found, not deleting\n", ntohs(port)); } -#endif } +#endif static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 720516b0e8ee..47bd8b3145a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2313,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; - struct udphdr *oudph; - struct iphdr *oiph; + struct udphdr *oudph = NULL; + struct iphdr *oiph = NULL; u32 l4_tunnel = 0; if (skb->encapsulation) { diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b1de7afd4116..3ddf657bc10b 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme) } static inline void -jme_clear_pm(struct jme_adapter *jme) +jme_clear_pm_enable_wol(struct jme_adapter *jme) { jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); } +static inline void +jme_clear_pm_disable_wol(struct jme_adapter *jme) +{ + jwrite32(jme, JME_PMCS, PMCS_STMASK); +} + static int jme_reload_eeprom(struct jme_adapter *jme) { @@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev) struct jme_adapter *jme = netdev_priv(netdev); int rc; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); JME_NAPI_ENABLE(jme); tasklet_init(&jme->linkch_task, jme_link_change_tasklet, @@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme) static void jme_powersave_phy(struct jme_adapter *jme) { - if (jme->reg_pmcs) { + if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { jme_set_100m_half(jme); if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) jme_wait_link(jme); - jme_clear_pm(jme); + jme_clear_pm_enable_wol(jme); } else { jme_phy_off(jme); } @@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev, if (wol->wolopts & WAKE_MAGIC) jme->reg_pmcs |= PMCS_MFEN; - jwrite32(jme, JME_PMCS, jme->reg_pmcs); - device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); - return 0; } @@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_read = jme_mdio_read; jme->mii_if.mdio_write = jme_mdio_write; - jme_clear_pm(jme); - device_set_wakeup_enable(&pdev->dev, true); + jme_clear_pm_disable_wol(jme); + device_init_wakeup(&pdev->dev, true); jme_set_phyfifo_5level(jme); jme->pcirev = pdev->revision; @@ -3304,7 +3307,7 @@ jme_resume(struct device *dev) if (!netif_running(netdev)) return 0; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) jme_set_settings(netdev, &jme->old_ecmd); @@ -3312,13 +3315,14 @@ jme_resume(struct device *dev) jme_reset_phy_processor(jme); jme_phy_calibration(jme); jme_phy_setEA(jme); - jme_start_irq(jme); netif_device_attach(netdev); atomic_inc(&jme->link_changing); jme_reset_link(jme); + jme_start_irq(jme); + return 0; } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a0c03834a2f7..55831188bc32 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, if (length <= 8 && (uintptr_t)data & 0x7) { /* Copy unaligned small data fragment to TSO header data area */ - memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, + memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, data, length); desc->buf_ptr = txq->tso_hdrs_dma - + txq->tx_curr_desc * TSO_HEADER_SIZE; + + tx_index * TSO_HEADER_SIZE; } else { /* Alignment is okay, map buffer and hand off to hardware */ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fabc8df40392..b0ae69f84493 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -11,28 +11,28 @@ * warranty of any kind, whether express or implied. */ -#include <linux/kernel.h> -#include <linux/netdevice.h> +#include <linux/clk.h> +#include <linux/cpu.h> #include <linux/etherdevice.h> -#include <linux/platform_device.h> -#include <linux/skbuff.h> +#include <linux/if_vlan.h> #include <linux/inetdevice.h> -#include <linux/mbus.h> -#include <linux/module.h> #include <linux/interrupt.h> -#include <linux/if_vlan.h> -#include <net/ip.h> -#include <net/ipv6.h> #include <linux/io.h> -#include <net/tso.h> +#include <linux/kernel.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_address.h> #include <linux/phy.h> -#include <linux/clk.h> -#include <linux/cpu.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/tso.h> /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) @@ -370,9 +370,16 @@ struct mvneta_port { struct net_device *dev; struct notifier_block cpu_notifier; int rxq_def; + /* Protect the access to the percpu interrupt registers, + * ensuring that the configuration remains coherent. + */ + spinlock_t lock; + bool is_stopped; /* Core clock */ struct clk *clk; + /* AXI clock */ + struct clk *clk_bus; u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; @@ -1036,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) } } +static void mvneta_percpu_unmask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are unmasked, but actually only the ones + * mapped to this CPU will be unmasked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK_ALL | + MVNETA_TX_INTR_MASK_ALL | + MVNETA_MISCINTR_INTR_MASK); +} + +static void mvneta_percpu_mask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are masked, but actually only the ones + * mapped to this CPU will be masked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); +} + +static void mvneta_percpu_clear_intr_cause(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are cleared, but actually only the ones + * mapped to this CPU will be cleared + */ + mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); +} + /* This method sets defaults to the NETA port: * Clears interrupt Cause and Mask registers. * Clears all MAC tables. @@ -1053,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp) int max_cpu = num_present_cpus(); /* Clear all Cause registers */ - mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); - mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); - mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); /* Mask all interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_ENABLE, 0); /* Enable MBUS Retry bit16 */ @@ -2526,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } -static void mvneta_percpu_unmask_interrupt(void *arg) -{ - struct mvneta_port *pp = arg; - - /* All the queue are unmasked, but actually only the ones - * maped to this CPU will be unmasked - */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK_ALL | - MVNETA_TX_INTR_MASK_ALL | - MVNETA_MISCINTR_INTR_MASK); -} - -static void mvneta_percpu_mask_interrupt(void *arg) -{ - struct mvneta_port *pp = arg; - - /* All the queue are masked, but actually only the ones - * maped to this CPU will be masked - */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); -} - static void mvneta_start_dev(struct mvneta_port *pp) { - unsigned int cpu; + int cpu; mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -2562,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp) mvneta_port_enable(pp); /* Enable polling on the port */ - for_each_present_cpu(cpu) { + for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_enable(&port->napi); } /* Unmask interrupts. It has to be done from each CPU */ - for_each_online_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, - pp, true); + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | @@ -2587,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp) phy_stop(pp->phy_dev); - for_each_present_cpu(cpu) { + for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_disable(&port->napi); @@ -2602,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp) mvneta_port_disable(pp); /* Clear all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); - mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); + on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); /* Mask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); mvneta_tx_reset(pp); mvneta_rx_reset(pp); @@ -2845,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg) disable_percpu_irq(pp->dev->irq); } +/* Electing a CPU must be done in an atomic way: it should be done + * after or before the removal/insertion of a CPU and this function is + * not reentrant. + */ static void mvneta_percpu_elect(struct mvneta_port *pp) { - int online_cpu_idx, max_cpu, cpu, i = 0; + int elected_cpu = 0, max_cpu, cpu, i = 0; + + /* Use the cpu associated to the rxq when it is online, in all + * the other cases, use the cpu 0 which can't be offline. + */ + if (cpu_online(pp->rxq_def)) + elected_cpu = pp->rxq_def; - online_cpu_idx = pp->rxq_def % num_online_cpus(); max_cpu = num_present_cpus(); for_each_online_cpu(cpu) { @@ -2860,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) if ((rxq % max_cpu) == cpu) rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); - if (i == online_cpu_idx) + if (cpu == elected_cpu) /* Map the default receive queue queue to the * elected CPU */ @@ -2871,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) * the CPU bound to the default RX queue */ if (txq_number == 1) - txq_map = (i == online_cpu_idx) ? + txq_map = (cpu == elected_cpu) ? MVNETA_CPU_TXQ_ACCESS(1) : 0; else txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & @@ -2900,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: + spin_lock(&pp->lock); + /* Configuring the driver for a new CPU while the + * driver is stopping is racy, so just avoid it. + */ + if (pp->is_stopped) { + spin_unlock(&pp->lock); + break; + } netif_tx_stop_all_queues(pp->dev); /* We have to synchronise on tha napi of each CPU @@ -2915,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, } /* Mask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); napi_enable(&port->napi); @@ -2932,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, */ mvneta_percpu_elect(pp); - /* Unmask all ethernet port interrupts, as this - * notifier is called for each CPU then the CPU to - * Queue mapping is applied - */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | - MVNETA_TX_INTR_MASK(txq_number) | - MVNETA_MISCINTR_INTR_MASK); + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_PSC_SYNC_CHANGE); netif_tx_start_all_queues(pp->dev); + spin_unlock(&pp->lock); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: netif_tx_stop_all_queues(pp->dev); + /* Thanks to this lock we are sure that any pending + * cpu election is done + */ + spin_lock(&pp->lock); /* Mask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + spin_unlock(&pp->lock); napi_synchronize(&port->napi); napi_disable(&port->napi); @@ -2966,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, case CPU_DEAD: case CPU_DEAD_FROZEN: /* Check if a new CPU must be elected now this on is down */ + spin_lock(&pp->lock); mvneta_percpu_elect(pp); + spin_unlock(&pp->lock); /* Unmask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | - MVNETA_TX_INTR_MASK(txq_number) | - MVNETA_MISCINTR_INTR_MASK); + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | @@ -2986,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, static int mvneta_open(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - int ret, cpu; + int ret; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + @@ -3008,22 +3031,12 @@ static int mvneta_open(struct net_device *dev) goto err_cleanup_txqs; } - /* Even though the documentation says that request_percpu_irq - * doesn't enable the interrupts automatically, it actually - * does so on the local CPU. - * - * Make sure it's disabled. - */ - mvneta_percpu_disable(pp); - /* Enable per-CPU interrupt on all the CPU to handle our RX * queue interrupts */ - for_each_online_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_enable, - pp, true); - + on_each_cpu(mvneta_percpu_enable, pp, true); + pp->is_stopped = false; /* Register a CPU notifier to handle the case where our CPU * might be taken offline. */ @@ -3055,13 +3068,20 @@ err_cleanup_rxqs: static int mvneta_stop(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - int cpu; + /* Inform that we are stopping so we don't want to setup the + * driver for new CPUs in the notifiers + */ + spin_lock(&pp->lock); + pp->is_stopped = true; mvneta_stop_dev(pp); mvneta_mdio_remove(pp); unregister_cpu_notifier(&pp->cpu_notifier); - for_each_present_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); + /* Now that the notifier are unregistered, we can release le + * lock + */ + spin_unlock(&pp->lock); + on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(dev->irq, pp->ports); mvneta_cleanup_rxqs(pp); mvneta_cleanup_txqs(pp); @@ -3242,26 +3262,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) const struct mvneta_statistic *s; void __iomem *base = pp->base; u32 high, low, val; + u64 val64; int i; for (i = 0, s = mvneta_statistics; s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s++, i++) { - val = 0; - switch (s->type) { case T_REG_32: val = readl_relaxed(base + s->offset); + pp->ethtool_stats[i] += val; break; case T_REG_64: /* Docs say to read low 32-bit then high */ low = readl_relaxed(base + s->offset); high = readl_relaxed(base + s->offset + 4); - val = (u64)high << 32 | low; + val64 = (u64)high << 32 | low; + pp->ethtool_stats[i] += val64; break; } - - pp->ethtool_stats[i] += val; } } @@ -3311,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp) netif_tx_stop_all_queues(pp->dev); - for_each_online_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, - pp, true); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); /* We have to synchronise on the napi of each CPU */ for_each_online_cpu(cpu) { @@ -3334,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp) mvreg_write(pp, MVNETA_PORT_CONFIG, val); /* Update the elected CPU matching the new rxq_def */ + spin_lock(&pp->lock); mvneta_percpu_elect(pp); + spin_unlock(&pp->lock); /* We have to synchronise on the napi of each CPU */ for_each_online_cpu(cpu) { @@ -3605,7 +3624,9 @@ static int mvneta_probe(struct platform_device *pdev) pp->indir[0] = rxq_def; - pp->clk = devm_clk_get(&pdev->dev, NULL); + pp->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(pp->clk)) + pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); goto err_put_phy_node; @@ -3613,6 +3634,10 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk); + pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(pp->clk_bus)) + clk_prepare_enable(pp->clk_bus); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pp->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pp->base)) { @@ -3724,6 +3749,7 @@ err_free_stats: err_free_ports: free_percpu(pp->ports); err_clk: + clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); err_put_phy_node: of_node_put(phy_node); @@ -3741,6 +3767,7 @@ static int mvneta_remove(struct platform_device *pdev) struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); + clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a4beccf1fd46..c797971aefab 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3061,7 +3061,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) - return -1; + return -ENOMEM; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); pe->index = tid; @@ -3077,7 +3077,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, if (pmap == 0) { if (add) { kfree(pe); - return -1; + return -EINVAL; } mvpp2_prs_hw_inv(priv, pe->index); priv->prs_shadow[pe->index].valid = false; diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 715de8affcc9..c7e939945259 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) err = mlx4_reset_slave(dev); else err = mlx4_reset_master(dev); - BUG_ON(err != 0); + if (!err) { + mlx4_err(dev, "device was reset successfully\n"); + } else { + /* EEH could have disabled the PCI channel during reset. That's + * recoverable and the PCI error flow will handle it. + */ + if (!pci_channel_offline(dev->persist->pdev)) + BUG_ON(1); + } dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; - mlx4_err(dev, "device was reset successfully\n"); mutex_unlock(&persist->device_state_mutex); /* At that step HW was already reset, now notify clients */ diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index d48d5793407d..e94ca1c3fc7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2429,7 +2429,7 @@ err_thread: flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); err_slaves: - while (--i) { + while (i--) { for (port = 1; port <= MLX4_MAX_PORTS; port++) kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); } diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 3348e646db70..a849da92f857 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, if (timestamp_en) cq_context->flags |= cpu_to_be32(1 << 19); - cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); + cq_context->logsize_usrpage = + cpu_to_be32((ilog2(nent) << 24) | + mlx4_to_hw_uar_index(dev, uar->index)); cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 038f9ce391e6..1494997c4f7e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { .enable = mlx4_en_phc_enable, }; +#define MLX4_EN_WRAP_AROUND_SEC 10ULL + +/* This function calculates the max shift that enables the user range + * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register. + */ +static u32 freq_to_shift(u16 freq) +{ + u32 freq_khz = freq * 1000; + u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; + u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? + max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; + /* calculate max possible multiplier in order to fit in 64bit */ + u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); + + /* This comes from the reverse of clocksource_khz2mult */ + return ilog2(div_u64(max_mul * freq_khz, 1000000)); +} + void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; @@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); - /* Using shift to make calculation more accurate. Since current HW - * clock frequency is 427 MHz, and cycles are given using a 48 bits - * register, the biggest shift when calculating using u64, is 14 - * (max_cycles * multiplier < 2^64) - */ - mdev->cycles.shift = 14; + mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); mdev->cycles.mult = clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0c7e3f69a73b..21e2c0960271 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2245,7 +2245,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) struct mlx4_en_dev *mdev = en_priv->mdev; u64 mac_u64 = mlx4_mac_to_u64(mac); - if (!is_valid_ether_addr(mac)) + if (is_multicast_ether_addr(mac)) return -EINVAL; return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); @@ -2344,8 +2344,6 @@ out: /* set offloads */ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; - priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2356,8 +2354,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) /* unset offloads */ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); - priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; - priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); @@ -2980,6 +2976,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->rss_hash_fn = ETH_RSS_HASH_TOP; } + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + dev->features |= NETIF_F_GSO_UDP_TUNNEL; + } + mdev->pndev[port] = dev; mdev->upper[port] = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index ee99e67187f5..3904b5fc0b7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->collisions = 0; stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); - stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_over_errors = 0; stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); stats->rx_frame_errors = 0; stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); - stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_missed_errors = 0; stats->tx_aborted_errors = 0; stats->tx_carrier_errors = 0; stats->tx_fifo_errors = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 12aab5a659d3..02e925d6f734 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, } else { context->sq_size_stride = ilog2(TXBB_SIZE) - 4; } - context->usr_page = cpu_to_be32(mdev->priv_uar.index); + context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, + mdev->priv_uar.index)); context->local_qpn = cpu_to_be32(qpn); context->pri_path.ackto = 1 & 0x07; context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4421bf5463f6..e0946ab22010 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, user_prio, &ring->context); if (ring->bf_alloced) - ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + ring->context.usr_page = + cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, + ring->bf.uar->index)); err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, &ring->qp, &ring->qp_state); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 4696053165f8..f613977455e0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) if (!priv->eq_table.uar_map[index]) { priv->eq_table.uar_map[index] = - ioremap(pci_resource_start(dev->persist->pdev, 2) + - ((eq->eqn / 4) << PAGE_SHIFT), - PAGE_SIZE); + ioremap( + pci_resource_start(dev->persist->pdev, 2) + + ((eq->eqn / 4) << (dev->uar_page_shift)), + (1 << (dev->uar_page_shift))); if (!priv->eq_table.uar_map[index]) { mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", eq->eqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index f1b6d219e445..f8674ae62752 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -168,6 +168,20 @@ struct mlx4_port_config { static atomic_t pf_loading = ATOMIC_INIT(0); +static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + /* The reserved_uars is calculated by system page size unit. + * Therefore, adjustment is added when the uar page size is less + * than the system page size + */ + dev->caps.reserved_uars = + max_t(int, + mlx4_get_num_reserved_uar(dev), + dev_cap->reserved_uars / + (1 << (PAGE_SHIFT - dev->uar_page_shift))); +} + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -386,8 +400,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.reserved_mtts = dev_cap->reserved_mtts; dev->caps.reserved_mrws = dev_cap->reserved_mrws; - /* The first 128 UARs are used for EQ doorbells */ - dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->reserved_xrcds : 0; @@ -405,6 +417,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + /* Save uar page shift */ + if (!mlx4_is_slave(dev)) { + /* Virtual PCI function needs to determine UAR page size from + * firmware. Only master PCI function can set the uar page size + */ + dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; + mlx4_set_num_reserved_uars(dev, dev_cap); + } + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { struct mlx4_init_hca_param hca_param; @@ -815,16 +836,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENODEV; } - /* slave gets uar page size from QUERY_HCA fw command */ - dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); + /* Set uar_page_shift for VF */ + dev->uar_page_shift = hca_param.uar_page_sz + 12; - /* TODO: relax this assumption */ - if (dev->caps.uar_page_size != PAGE_SIZE) { - mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", - dev->caps.uar_page_size, PAGE_SIZE); - return -ENODEV; + /* Make sure the master uar page size is valid */ + if (dev->uar_page_shift > PAGE_SHIFT) { + mlx4_err(dev, + "Invalid configuration: uar page size is larger than system page size\n"); + return -ENODEV; } + /* Set reserved_uars based on the uar_page_shift */ + mlx4_set_num_reserved_uars(dev, &dev_cap); + + /* Although uar page size in FW differs from system page size, + * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) + * still works with assumption that uar page size == system page size + */ + dev->caps.uar_page_size = PAGE_SIZE; + memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); if (err) { @@ -1226,6 +1256,7 @@ err_set_port: static int mlx4_mf_bond(struct mlx4_dev *dev) { int err = 0; + int nvfs; struct mlx4_slaves_pport slaves_port1; struct mlx4_slaves_pport slaves_port2; DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); @@ -1242,11 +1273,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev) return -EINVAL; } + /* number of virtual functions is number of total functions minus one + * physical function for each port. + */ + nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + + bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; + /* limit on maximum allowed VFs */ - if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + - bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > - MAX_MF_BOND_ALLOWED_SLAVES) + if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { + mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", + nvfs, MAX_MF_BOND_ALLOWED_SLAVES); return -EINVAL; + } if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); @@ -2179,8 +2217,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev) dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; - init_hca.log_uar_sz = ilog2(dev->caps.num_uars); - init_hca.uar_page_sz = PAGE_SHIFT - 12; + /* Always set UAR page size 4KB, set log_uar_sz accordingly */ + init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + + PAGE_SHIFT - + DEFAULT_UAR_PAGE_SHIFT; + init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; + init_hca.mw_enabled = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 609c59dc854e..b3cc3ab63799 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free); int mlx4_init_uar_table(struct mlx4_dev *dev) { - if (dev->caps.num_uars <= 128) { - mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", - dev->caps.num_uars); + int num_reserved_uar = mlx4_get_num_reserved_uar(dev); + + mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift); + mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars); + + if (dev->caps.num_uars <= num_reserved_uar) { + mlx4_err( + dev, "Only %d UAR pages (need more than %d)\n", + dev->caps.num_uars, num_reserved_uar); mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 787b7bb54d52..211c65087997 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) if (need_mf_bond) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, if (need_mf_bond) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index b46dbe29ef6c..25ce1b030a00 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, counter_index, RES_COUNTER); - if (!r || r->owner != slave) + if (!r || r->owner != slave) { ret = -EINVAL; - counter = container_of(r, struct res_counter, com); - if (!counter->port) - counter->port = port; + } else { + counter = container_of(r, struct res_counter, com); + if (!counter->port) + counter->port = port; + } spin_unlock_irq(mlx4_tlock(dev)); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index aac071a7e830..5b1753233c5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -223,6 +223,7 @@ struct mlx5e_pport_stats { static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "packets", + "bytes", "csum_none", "csum_sw", "lro_packets", @@ -232,16 +233,18 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { struct mlx5e_rq_stats { u64 packets; + u64 bytes; u64 csum_none; u64 csum_sw; u64 lro_packets; u64 lro_bytes; u64 wqe_err; -#define NUM_RQ_STATS 6 +#define NUM_RQ_STATS 7 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { "packets", + "bytes", "tso_packets", "tso_bytes", "csum_offload_none", @@ -253,6 +256,7 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = { struct mlx5e_sq_stats { u64 packets; + u64 bytes; u64 tso_packets; u64 tso_bytes; u64 csum_offload_none; @@ -260,7 +264,7 @@ struct mlx5e_sq_stats { u64 wake; u64 dropped; u64 nop; -#define NUM_SQ_STATS 8 +#define NUM_SQ_STATS 9 }; struct mlx5e_stats { @@ -304,14 +308,9 @@ enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, }; -enum cq_flags { - MLX5E_CQ_HAS_CQES = 1, -}; - struct mlx5e_cq { /* data path - accessed per cqe */ struct mlx5_cqwq wq; - unsigned long flags; /* data path - accessed per napi poll */ struct napi_struct *napi; @@ -452,6 +451,8 @@ enum mlx5e_traffic_types { MLX5E_NUM_TT, }; +#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY) + enum mlx5e_rqt_ix { MLX5E_INDIRECTION_RQT, MLX5E_SINGLE_RQ_RQT, @@ -618,9 +619,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); +void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5e_tx_wqe *wqe, int bf_sz) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index be6543570b2b..2018eebe1531 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, overflow_work); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); } @@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); u64 ns = timespec64_to_ns(ts); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_init(&tstamp->clock, &tstamp->cycles, ns); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } @@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); u64 ns; + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); ns = timecounter_read(&tstamp->clock); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); *ts = ns_to_timespec64(ns); @@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_adjtime(&tstamp->clock, delta); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } @@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) { u64 adj; u32 diff; + unsigned long flags; int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); @@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : tstamp->nominal_c_mult + diff; - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 65624ac65b4c..5abeb00fceb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -385,6 +385,8 @@ static int mlx5e_set_channels(struct net_device *dev, mlx5e_close_locked(dev); priv->params.num_channels = count; + mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); if (was_opened) err = mlx5e_open_locked(dev); @@ -703,18 +705,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) +{ + struct mlx5_core_dev *mdev = priv->mdev; + void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + int i; + + MLX5_SET(modify_tir_in, in, bitmask.hash, 1); + mlx5e_build_tir_ctx_hash(tirc, priv); + + for (i = 0; i < MLX5E_NUM_TT; i++) + if (IS_HASHING_TT(i)) + mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen); +} + static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct mlx5e_priv *priv = netdev_priv(dev); - bool close_open; - int err = 0; + int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + void *in; if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_XOR) && (hfunc != ETH_RSS_HASH_TOP)) return -EINVAL; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + mutex_lock(&priv->state_lock); if (indir) { @@ -723,11 +743,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); } - close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) && - test_bit(MLX5E_STATE_OPENED, &priv->state); - if (close_open) - mlx5e_close_locked(dev); - if (key) memcpy(priv->params.toeplitz_hash_key, key, sizeof(priv->params.toeplitz_hash_key)); @@ -735,12 +750,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, if (hfunc != ETH_RSS_HASH_NO_CHANGE) priv->params.rss_hfunc = hfunc; - if (close_open) - err = mlx5e_open_locked(priv->netdev); + mlx5e_modify_tirs_hash(priv, in, inlen); mutex_unlock(&priv->state_lock); - return err; + kvfree(in); + + return 0; } static int mlx5e_get_rxnfc(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6a3e430f1062..402994bf7e16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -141,6 +141,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) return; /* Collect firts the SW counters and then HW for consistency */ + s->rx_packets = 0; + s->rx_bytes = 0; + s->tx_packets = 0; + s->tx_bytes = 0; s->tso_packets = 0; s->tso_bytes = 0; s->tx_queue_stopped = 0; @@ -155,6 +159,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; s->lro_packets += rq_stats->lro_packets; s->lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; @@ -164,6 +170,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; s->tso_packets += sq_stats->tso_packets; s->tso_bytes += sq_stats->tso_bytes; s->tx_queue_stopped += sq_stats->stopped; @@ -225,23 +233,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->tx_broadcast_bytes = MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); - s->rx_packets = - s->rx_unicast_packets + - s->rx_multicast_packets + - s->rx_broadcast_packets; - s->rx_bytes = - s->rx_unicast_bytes + - s->rx_multicast_bytes + - s->rx_broadcast_bytes; - s->tx_packets = - s->tx_unicast_packets + - s->tx_multicast_packets + - s->tx_broadcast_packets; - s->tx_bytes = - s->tx_unicast_bytes + - s->tx_multicast_bytes + - s->tx_broadcast_bytes; - /* Update calculated offload counters */ s->tx_csum_offload = s->tx_packets - tx_offload_none; s->rx_csum_good = s->rx_packets - s->rx_csum_none - @@ -1199,7 +1190,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); ix = priv->params.indirection_rqt[ix]; - ix = ix % priv->params.num_channels; MLX5_SET(rqtc, rqtc, rq_num[i], test_bit(MLX5E_STATE_OPENED, &priv->state) ? priv->channel[ix]->rq.rqn : @@ -1317,7 +1307,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) lro_timer_supported_periods[2])); } -static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) +void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) +{ + MLX5_SET(tirc, tirc, rx_hash_fn, + mlx5e_rx_hash_fn(priv->params.rss_hfunc)); + if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { + void *rss_key = MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key); + size_t len = MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + memcpy(rss_key, priv->params.toeplitz_hash_key, len); + } +} + +static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1325,6 +1330,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) void *tirc; int inlen; int err; + int tt; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); @@ -1336,7 +1342,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) mlx5e_build_tir_ctx_lro(tirc, priv); - err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + if (err) + break; + } kvfree(in); @@ -1672,17 +1682,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) default: MLX5_SET(tirc, tirc, indirect_table, priv->rqtn[MLX5E_INDIRECTION_RQT]); - MLX5_SET(tirc, tirc, rx_hash_fn, - mlx5e_rx_hash_fn(priv->params.rss_hfunc)); - if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { - void *rss_key = MLX5_ADDR_OF(tirc, tirc, - rx_hash_toeplitz_key); - size_t len = MLX5_FLD_SZ_BYTES(tirc, - rx_hash_toeplitz_key); - - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - memcpy(rss_key, priv->params.toeplitz_hash_key, len); - } + mlx5e_build_tir_ctx_hash(tirc, priv); break; } @@ -1885,8 +1885,10 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_close_locked(priv->netdev); priv->params.lro_en = !!(features & NETIF_F_LRO); - mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); - mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); + err = mlx5e_modify_tirs_lro(priv); + if (err) + mlx5_core_warn(priv->mdev, "lro modify failed, %d\n", + err); if (was_opened) err = mlx5e_open_locked(priv->netdev); @@ -2024,18 +2026,37 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } -static struct net_device_ops mlx5e_netdev_ops = { +static const struct net_device_ops mlx5e_netdev_ops_basic = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_get_stats64 = mlx5e_get_stats, + .ndo_set_rx_mode = mlx5e_set_rx_mode, + .ndo_set_mac_address = mlx5e_set_mac, + .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, + .ndo_set_features = mlx5e_set_features, + .ndo_change_mtu = mlx5e_change_mtu, + .ndo_do_ioctl = mlx5e_ioctl, +}; + +static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, .ndo_set_mac_address = mlx5e_set_mac, - .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, + .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, - .ndo_change_mtu = mlx5e_change_mtu, - .ndo_do_ioctl = mlx5e_ioctl, + .ndo_change_mtu = mlx5e_change_mtu, + .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_vf_mac = mlx5e_set_vf_mac, + .ndo_set_vf_vlan = mlx5e_set_vf_vlan, + .ndo_get_vf_config = mlx5e_get_vf_config, + .ndo_set_vf_link_state = mlx5e_set_vf_link_state, + .ndo_get_vf_stats = mlx5e_get_vf_stats, }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -2070,12 +2091,20 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; } +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) +{ + int i; + + for (i = 0; i < len; i++) + indirection_rqt[i] = i % num_channels; +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) { struct mlx5e_priv *priv = netdev_priv(netdev); - int i; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -2099,8 +2128,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, netdev_rss_key_fill(priv->params.toeplitz_hash_key, sizeof(priv->params.toeplitz_hash_key)); - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) - priv->params.indirection_rqt[i] = i % num_channels; + mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, num_channels); priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; @@ -2137,18 +2166,11 @@ static void mlx5e_build_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (priv->params.num_tc > 1) - mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; - - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac; - mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan; - mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config; - mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state; - mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats; - } + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + netdev->netdev_ops = &mlx5e_netdev_ops_sriov; + else + netdev->netdev_ops = &mlx5e_netdev_ops_basic; - netdev->netdev_ops = &mlx5e_netdev_ops; netdev->watchdog_timeo = 15 * HZ; netdev->ethtool_ops = &mlx5e_ethtool_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index dd959d929aad..59658b9d05d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -230,10 +230,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done; - /* avoid accessing cq (dma coherent memory) if not needed */ - if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return 0; - for (work_done = 0; work_done < budget; work_done++) { struct mlx5e_rx_wqe *wqe; struct mlx5_cqe64 *cqe; @@ -267,6 +263,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) mlx5e_build_rx_skb(cqe, rq, skb); rq->stats.packets++; + rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); napi_gro_receive(cq->napi, skb); wq_ll_pop: @@ -279,8 +276,5 @@ wq_ll_pop: /* ensure cq space is freed before enabling more cqes */ wmb(); - if (work_done == budget) - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return work_done; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2c3fba0fff54..bb4eeeb007de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -179,6 +179,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) unsigned int skb_len = skb->len; u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; + unsigned int num_bytes; bool bf = false; u16 headlen; u16 ds_cnt; @@ -204,8 +205,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) opcode = MLX5_OPCODE_LSO; ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); payload_len = skb->len - ihs; - wi->num_bytes = skb->len + - (skb_shinfo(skb)->gso_segs - 1) * ihs; + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.tso_packets++; sq->stats.tso_bytes += payload_len; } else { @@ -213,9 +213,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); - wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } + wi->num_bytes = num_bytes; + if (skb_vlan_tag_present(skb)) { mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, &skb_len); @@ -307,6 +309,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->bf_budget = bf ? sq->bf_budget - 1 : 0; sq->stats.packets++; + sq->stats.bytes += num_bytes; return NETDEV_TX_OK; dma_unmap_wqe_err: @@ -335,10 +338,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) u16 sqcc; int i; - /* avoid accessing cq (dma coherent memory) if not needed */ - if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return false; - sq = container_of(cq, struct mlx5e_sq, cq); npkts = 0; @@ -422,10 +421,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) netif_tx_wake_queue(sq->txq); sq->stats.wake++; } - if (i == MLX5E_TX_CQ_POLL_BUDGET) { - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return true; - } - return false; + return (i == MLX5E_TX_CQ_POLL_BUDGET); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 4ac8d716dbdd..66d51a77609e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); barrier(); napi_schedule(cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c071077aafbd..7992c553c1f5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) { int index = q->producer_counter & (q->count - 1); - if ((q->producer_counter - q->consumer_counter) == q->count) + if ((u16) (q->producer_counter - q->consumer_counter) == q->count) return NULL; return mlxsw_pci_queue_elem_info_get(q, index); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index 726f5435b32f..ae65b9940aed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -49,7 +49,7 @@ #define MLXSW_PORT_MID 0xd000 #define MLXSW_PORT_MAX_PHY_PORTS 0x40 -#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS +#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1) #define MLXSW_PORT_DEVID_BITS_OFFSET 10 #define MLXSW_PORT_PHY_BITS_OFFSET 4 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0c5237264e3e..ffe4c0305733 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, } } +/* SPAFT - Switch Port Acceptable Frame Types + * ------------------------------------------ + * The Switch Port Acceptable Frame Types register configures the frame + * admittance of the port. + */ +#define MLXSW_REG_SPAFT_ID 0x2010 +#define MLXSW_REG_SPAFT_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_spaft = { + .id = MLXSW_REG_SPAFT_ID, + .len = MLXSW_REG_SPAFT_LEN, +}; + +/* reg_spaft_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is not supported (all tag types are allowed). + */ +MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8); + +/* reg_spaft_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8); + +/* reg_spaft_allow_untagged + * When set, untagged frames on the ingress are allowed (default). + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1); + +/* reg_spaft_allow_prio_tagged + * When set, priority tagged frames on the ingress are allowed (default). + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1); + +/* reg_spaft_allow_tagged + * When set, tagged frames on the ingress are allowed (default). + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1); + +static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port, + bool allow_untagged) +{ + MLXSW_REG_ZERO(spaft, payload); + mlxsw_reg_spaft_local_port_set(payload, local_port); + mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged); + mlxsw_reg_spaft_allow_prio_tagged_set(payload, true); + mlxsw_reg_spaft_allow_tagged_set(payload, true); +} + /* SFGC - Switch Flooding Group Configuration * ------------------------------------------ * The following register controls the association of flooding tables and MIDs @@ -1044,6 +1100,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_port_mask_set(payload, port, 1); } +/* SFDF - Switch Filtering DB Flush + * -------------------------------- + * The switch filtering DB flush register is used to flush the FDB. + * Note that FDB notifications are flushed as well. + */ +#define MLXSW_REG_SFDF_ID 0x2013 +#define MLXSW_REG_SFDF_LEN 0x14 + +static const struct mlxsw_reg_info mlxsw_reg_sfdf = { + .id = MLXSW_REG_SFDF_ID, + .len = MLXSW_REG_SFDF_LEN, +}; + +/* reg_sfdf_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8); + +enum mlxsw_reg_sfdf_flush_type { + MLXSW_REG_SFDF_FLUSH_PER_SWID, + MLXSW_REG_SFDF_FLUSH_PER_FID, + MLXSW_REG_SFDF_FLUSH_PER_PORT, + MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID, + MLXSW_REG_SFDF_FLUSH_PER_LAG, + MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID, +}; + +/* reg_sfdf_flush_type + * Flush type. + * 0 - All SWID dynamic entries are flushed. + * 1 - All FID dynamic entries are flushed. + * 2 - All dynamic entries pointing to port are flushed. + * 3 - All FID dynamic entries pointing to port are flushed. + * 4 - All dynamic entries pointing to LAG are flushed. + * 5 - All FID dynamic entries pointing to LAG are flushed. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4); + +/* reg_sfdf_flush_static + * Static. + * 0 - Flush only dynamic entries. + * 1 - Flush both dynamic and static entries. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1); + +static inline void mlxsw_reg_sfdf_pack(char *payload, + enum mlxsw_reg_sfdf_flush_type type) +{ + MLXSW_REG_ZERO(sfdf, payload); + mlxsw_reg_sfdf_flush_type_set(payload, type); + mlxsw_reg_sfdf_flush_static_set(payload, true); +} + +/* reg_sfdf_fid + * FID to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16); + +/* reg_sfdf_system_port + * Port to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16); + +/* reg_sfdf_port_fid_system_port + * Port to flush, pointed to by FID. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16); + +/* reg_sfdf_lag_id + * LAG ID to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10); + +/* reg_sfdf_lag_fid_lag_id + * LAG ID to flush, pointed to by FID. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10); + /* SLDR - Switch LAG Descriptor Register * ----------------------------------------- * The switch LAG descriptor register is populated by LAG descriptors. @@ -1701,20 +1843,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8); * Module number. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false); /* reg_pmlp_tx_lane * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false); /* reg_pmlp_rx_lane * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is * equal to Tx lane. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false); static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) { @@ -3117,10 +3259,14 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SPVID"; case MLXSW_REG_SPVM_ID: return "SPVM"; + case MLXSW_REG_SPAFT_ID: + return "SPAFT"; case MLXSW_REG_SFGC_ID: return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; + case MLXSW_REG_SFDF_ID: + return "SFDF"; case MLXSW_REG_SLDR_ID: return "SLDR"; case MLXSW_REG_SLCR_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ce6845d534a8..a94daa8c346c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; +static int +mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); + mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, + mlxsw_sp_port->local_port); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); + mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, last_err = 0; + u16 vid; + + for (vid = 1; vid < VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); + if (err) + last_err = err; + } + + return last_err; +} + +static int +__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, last_err = 0; + u16 vid; + + for (vid = 1; vid < VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); + if (err) + last_err = err; + } + + return last_err; +} + +static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) +{ + if (!list_empty(&mlxsw_sp_port->vports_list)) + if (mlxsw_sp_port->lagged) + return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); + else + return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); + else + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); + else + return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); +} + +static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + + if (mlxsw_sp_vport->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, + fid); + else + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); +} + static bool mlxsw_sp_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; @@ -2006,10 +2115,16 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, + bool flush_fdb) { struct net_device *dev = mlxsw_sp_port->dev; + if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) + netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); + + mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@ -2200,10 +2315,15 @@ err_col_port_enable: return err; } +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev, + bool flush_fdb); + static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port *mlxsw_sp_vport; struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; int err; @@ -2220,7 +2340,30 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; + /* In case we leave a LAG device that has bridges built on top, + * then their teardown sequence is never issued and we need to + * invoke the necessary cleanup routines ourselves. + */ + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + struct net_device *br_dev; + + if (!mlxsw_sp_vport->bridged) + continue; + + br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); + } + + if (mlxsw_sp_port->bridged) { + mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); + mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + } + if (lag->ref_count == 1) { + if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) + netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); if (err) return err; @@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); } -static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev); - static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *vlan_dev) { @@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *br_dev; br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); } mlxsw_sp_vport->dev = mlxsw_sp_port->dev; @@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, } mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); + err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, + true); mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); if (err) { netdev_err(dev, "Failed to leave bridge\n"); @@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev) + struct net_device *br_dev, + bool flush_fdb) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); @@ -2604,6 +2746,16 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, goto err_vport_flood_set; } + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, + MLXSW_REG_SPMS_STATE_FORWARDING); + if (err) { + netdev_err(dev, "Failed to set STP state\n"); + goto err_port_stp_state_set; + } + + if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) + netdev_err(dev, "Failed to flush FDB\n"); + /* Switch between the vFIDs and destroy the old one if needed. */ new_vfid->nr_vports++; mlxsw_sp_vport->vport.vfid = new_vfid; @@ -2618,6 +2770,7 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, return 0; +err_port_stp_state_set: err_vport_flood_set: err_port_vid_learning_set: err_port_vid_to_fid_validate: @@ -2777,7 +2930,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, if (!mlxsw_sp_vport) return NOTIFY_DONE; err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev); + upper_dev, true); if (err) { netdev_err(dev, "Failed to leave bridge\n"); return NOTIFY_BAD; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a23dc610d259..3b89ed2f3c76 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -120,7 +120,6 @@ struct mlxsw_sp { } fdb_notify; #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; - struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */ struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; }; @@ -254,5 +253,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, bool set, bool only_uc); +void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); +int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 45479ef5bcf4..7b56098acc58 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -45,6 +45,7 @@ #include <linux/if_bridge.h> #include <linux/workqueue.h> #include <linux/jiffies.h> +#include <linux/rtnetlink.h> #include <net/switchdev.h> #include "spectrum.h" @@ -124,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, int err; switch (state) { - case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_FORWARDING: spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; break; - case BR_STATE_LISTENING: /* fall-through */ case BR_STATE_LEARNING: spms_state = MLXSW_REG_SPMS_STATE_LEARNING; break; + case BR_STATE_LISTENING: /* fall-through */ + case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_BLOCKING: spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; break; @@ -369,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, return err; } -static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char spvid_pl[MLXSW_REG_SPVID_LEN]; @@ -378,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); } +static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool allow) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char spaft_pl[MLXSW_REG_SPAFT_LEN]; + + mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); +} + +int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + if (!vid) { + err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); + if (err) { + netdev_err(dev, "Failed to disallow untagged traffic\n"); + return err; + } + } else { + err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + if (err) { + netdev_err(dev, "Failed to set PVID\n"); + return err; + } + + /* Only allow if not already allowed. */ + if (!mlxsw_sp_port->pvid) { + err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, + true); + if (err) { + netdev_err(dev, "Failed to allow untagged traffic\n"); + goto err_port_allow_untagged_set; + } + } + } + + mlxsw_sp_port->pvid = vid; + return 0; + +err_port_allow_untagged_set: + __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); + return err; +} + static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) { char sfmr_pl[MLXSW_REG_SFMR_LEN]; @@ -539,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, netdev_err(dev, "Unable to add PVID %d\n", vid_begin); goto err_port_pvid_set; } - mlxsw_sp_port->pvid = vid_begin; + } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); + if (err) { + netdev_err(dev, "Unable to del PVID\n"); + goto err_port_pvid_set; + } } /* Changing activity bits only if HW operation succeded */ @@ -891,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, return err; } + if (init) + goto out; + pvid = mlxsw_sp_port->pvid; - if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { - /* Default VLAN is always 1 */ - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + if (pvid >= vid_begin && pvid <= vid_end) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); if (err) { netdev_err(dev, "Unable to del PVID %d\n", pvid); return err; } - mlxsw_sp_port->pvid = 1; } - if (init) - goto out; - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, false); if (err) { @@ -936,6 +988,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, vlan->vid_begin, vlan->vid_end, false); } +void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) +{ + u16 vid; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); +} + static int mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb) @@ -1040,10 +1100,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + switchdev_obj_dump_cb_t *cb, + struct net_device *orig_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u16 vport_vid = 0, vport_fid = 0; + struct mlxsw_sp_port *tmp; + u16 vport_fid = 0; char *sfd_pl; char mac[ETH_ALEN]; u16 fid; @@ -1058,13 +1120,11 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, if (!sfd_pl) return -ENOMEM; - mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { u16 tmp; tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); vport_fid = mlxsw_sp_vfid_to_fid(tmp); - vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); } mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); @@ -1088,12 +1148,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, &local_port); if (local_port == mlxsw_sp_port->local_port) { - if (vport_fid && vport_fid != fid) - continue; - else if (vport_fid) - fdb->vid = vport_vid; - else + if (vport_fid && vport_fid == fid) + fdb->vid = 0; + else if (!vport_fid && + !mlxsw_sp_fid_is_vfid(fid)) fdb->vid = fid; + else + continue; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; err = cb(&fdb->obj); @@ -1104,14 +1165,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, mac, &fid, &lag_id); - if (mlxsw_sp_port == - mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { - if (vport_fid && vport_fid != fid) + tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); + if (tmp && tmp->local_port == + mlxsw_sp_port->local_port) { + /* LAG records can only point to LAG + * devices or VLAN devices on top. + */ + if (!netif_is_lag_master(orig_dev) && + !is_vlan_dev(orig_dev)) continue; - else if (vport_fid) - fdb->vid = vport_vid; - else + if (vport_fid && vport_fid == fid) + fdb->vid = 0; + else if (!vport_fid && + !mlxsw_sp_fid_is_vfid(fid)) fdb->vid = fid; + else + continue; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; err = cb(&fdb->obj); @@ -1124,7 +1193,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); out: - mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); kfree(sfd_pl); return stored_err ? stored_err : err; } @@ -1176,7 +1244,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb); + SWITCHDEV_OBJ_PORT_FDB(obj), cb, + obj->orig_dev); break; default: err = -EOPNOTSUPP; @@ -1194,14 +1263,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, }; -static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, - bool adding, char *mac, u16 vid, +static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, + char *mac, u16 vid, struct net_device *dev) { struct switchdev_notifier_fdb_info info; unsigned long notifier_type; - if (learning && learning_sync) { + if (learning_sync) { info.addr = mac; info.vid = vid; notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; @@ -1237,7 +1306,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); goto just_remove; } - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + vid = 0; /* Override the physical port with the vPort. */ mlxsw_sp_port = mlxsw_sp_vport; } else { @@ -1257,8 +1326,7 @@ do_fdb_op: if (!do_notification) return; - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, - mlxsw_sp_port->learning_sync, + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, vid, mlxsw_sp_port->dev); return; @@ -1273,6 +1341,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, bool adding) { struct mlxsw_sp_port *mlxsw_sp_port; + struct net_device *dev; char mac[ETH_ALEN]; u16 lag_vid = 0; u16 lag_id; @@ -1298,11 +1367,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, goto just_remove; } - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - lag_vid = vid; + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + dev = mlxsw_sp_vport->dev; + vid = 0; /* Override the physical port with the vPort. */ mlxsw_sp_port = mlxsw_sp_vport; } else { + dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; vid = fid; } @@ -1319,10 +1390,8 @@ do_fdb_op: if (!do_notification) return; - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, - mlxsw_sp_port->learning_sync, - adding, mac, vid, - mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev); + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, + vid, dev); return; just_remove: @@ -1374,7 +1443,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); - mutex_lock(&mlxsw_sp->fdb_lock); + rtnl_lock(); do { mlxsw_reg_sfn_pack(sfn_pl); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); @@ -1387,7 +1456,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); } while (num_rec); - mutex_unlock(&mlxsw_sp->fdb_lock); + rtnl_unlock(); kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -1402,7 +1471,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); return err; } - mutex_init(&mlxsw_sp->fdb_lock); INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index a10c928bbd6b..3e67f451f2ab 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -28,6 +28,16 @@ #include "moxart_ether.h" +static inline void moxart_desc_write(u32 data, u32 *desc) +{ + *desc = cpu_to_le32(data); +} + +static inline u32 moxart_desc_read(u32 *desc) +{ + return le32_to_cpu(*desc); +} + static inline void moxart_emac_write(struct net_device *ndev, unsigned int reg, unsigned long value) { @@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev) static void moxart_mac_setup_desc_ring(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - void __iomem *desc; + void *desc; int i; for (i = 0; i < TX_DESC_NUM; i++) { @@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; } - writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); + moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); priv->tx_head = 0; priv->tx_tail = 0; @@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) for (i = 0; i < RX_DESC_NUM; i++) { desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; memset(desc, 0, RX_REG_DESC_SIZE); - writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); - writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, + moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, desc + RX_REG_OFFSET_DESC1); priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; @@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) netdev_err(ndev, "DMA mapping error\n"); - writel(priv->rx_mapping[i], + moxart_desc_write(priv->rx_mapping[i], desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); - writel(priv->rx_buf[i], + moxart_desc_write((uintptr_t)priv->rx_buf[i], desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); } - writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); + moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); priv->rx_head = 0; @@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) napi); struct net_device *ndev = priv->ndev; struct sk_buff *skb; - void __iomem *desc; + void *desc; unsigned int desc0, len; int rx_head = priv->rx_head; int rx = 0; while (rx < budget) { desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); - desc0 = readl(desc + RX_REG_OFFSET_DESC0); + desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0); + rmb(); /* ensure desc0 is up to date */ if (desc0 & RX_DESC0_DMA_OWN) break; @@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) priv->stats.multicast++; rx_next: - writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + wmb(); /* prevent setting ownership back too early */ + moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); rx_head = RX_NEXT(rx_head); priv->rx_head = rx_head; @@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - void __iomem *desc; + void *desc; unsigned int len; unsigned int tx_head = priv->tx_head; u32 txdes1; @@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); spin_lock_irq(&priv->txlock); - if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { + if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { net_dbg_ratelimited("no TX space for packet\n"); priv->stats.tx_dropped++; goto out_unlock; } + rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */ len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; @@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->tx_len[tx_head] = len; priv->tx_skb[tx_head] = skb; - writel(priv->tx_mapping[tx_head], + moxart_desc_write(priv->tx_mapping[tx_head], desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); - writel(skb->data, + moxart_desc_write((uintptr_t)skb->data, desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); if (skb->len < ETH_ZLEN) { @@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); if (tx_head == TX_DESC_NUM_MASK) txdes1 |= TX_DESC1_END; - writel(txdes1, desc + TX_REG_OFFSET_DESC1); - writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); + moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1); + wmb(); /* flush descriptor before transferring ownership */ + moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); /* start to send packet */ writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); @@ -460,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ndev->base_addr = res->start; priv->base = devm_ioremap_resource(p_dev, res); - ret = IS_ERR(priv->base); - if (ret) { + if (IS_ERR(priv->base)) { dev_err(p_dev, "devm_ioremap_resource failed\n"); + ret = PTR_ERR(priv->base); goto init_fail; } diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 2be9280d608c..93a9563ac7c6 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -300,7 +300,7 @@ struct moxart_mac_priv_t { dma_addr_t rx_base; dma_addr_t rx_mapping[RX_DESC_NUM]; - void __iomem *rx_desc_base; + void *rx_desc_base; unsigned char *rx_buf_base; unsigned char *rx_buf[RX_DESC_NUM]; unsigned int rx_head; @@ -308,7 +308,7 @@ struct moxart_mac_priv_t { dma_addr_t tx_base; dma_addr_t tx_mapping[TX_DESC_NUM]; - void __iomem *tx_desc_base; + void *tx_desc_base; unsigned char *tx_buf_base; unsigned char *tx_buf[RX_DESC_NUM]; unsigned int tx_head; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50d5604833ed..e0993eba5df3 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) return IRQ_NONE; } -#ifdef CONFIG_PCI_MSI - static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; @@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) if (vdev->config.intr_type == MSI_X) pci_disable_msix(vdev->pdev); } -#endif static void vxge_rem_isr(struct vxgedev *vdev) { -#ifdef CONFIG_PCI_MSI - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && + vdev->config.intr_type == MSI_X) { vxge_rem_msix_isr(vdev); - } else -#endif - if (vdev->config.intr_type == INTA) { + } else if (vdev->config.intr_type == INTA) { synchronize_irq(vdev->pdev->irq); free_irq(vdev->pdev->irq, vdev); } @@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev) static int vxge_add_isr(struct vxgedev *vdev) { int ret = 0; -#ifdef CONFIG_PCI_MSI int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; int pci_fun = PCI_FUNC(vdev->pdev->devfn); - if (vdev->config.intr_type == MSI_X) + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) ret = vxge_enable_msix(vdev); if (ret) { @@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->config.intr_type = INTA; } - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) { for (intr_idx = 0; intr_idx < (vdev->no_of_vpath * VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { @@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; } -INTA_MODE: -#endif +INTA_MODE: if (vdev->config.intr_type == INTA) { snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge:INTA", vdev->ndev->name); @@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config, if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; -#ifndef CONFIG_PCI_MSI - vxge_debug_init(VXGE_ERR, - "%s: This Kernel does not support " - "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); - *intr_type = INTA; -#endif + if (!IS_ENABLED(CONFIG_PCI_MSI)) { + vxge_debug_init(VXGE_ERR, + "%s: This Kernel does not support " + "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); + *intr_type = INTA; + } /* Configure whether MSI-X or IRQL. */ switch (*intr_type) { diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 689a4a5c8dcf..1ef03939d25f 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev) dev->netdev_ops = &qcaspi_netdev_ops; qcaspi_set_ethtool_ops(dev); dev->watchdog_timeo = QCASPI_TX_TIMEOUT; - dev->flags = IFF_MULTICAST; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->tx_queue_len = 100; qca = netdev_priv(dev); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 17d5571d0432..dd2cf3738b73 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4933,8 +4933,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40: - RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); - break; case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: @@ -4943,8 +4941,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); - break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: @@ -6137,28 +6133,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) sw_cnt_1ms_ini = 16000000/rg_saw_cnt; sw_cnt_1ms_ini &= 0x0fff; data = r8168_mac_ocp_read(tp, 0xd412); - data &= 0x0fff; + data &= ~0x0fff; data |= sw_cnt_1ms_ini; r8168_mac_ocp_write(tp, 0xd412, data); } data = r8168_mac_ocp_read(tp, 0xe056); - data &= 0xf0; - data |= 0x07; + data &= ~0xf0; + data |= 0x70; r8168_mac_ocp_write(tp, 0xe056, data); data = r8168_mac_ocp_read(tp, 0xe052); - data &= 0x8008; - data |= 0x6000; + data &= ~0x6000; + data |= 0x8008; r8168_mac_ocp_write(tp, 0xe052, data); data = r8168_mac_ocp_read(tp, 0xe0d6); - data &= 0x01ff; + data &= ~0x01ff; data |= 0x017f; r8168_mac_ocp_write(tp, 0xe0d6, data); data = r8168_mac_ocp_read(tp, 0xd420); - data &= 0x0fff; + data &= ~0x0fff; data |= 0x047f; r8168_mac_ocp_write(tp, 0xd420, data); @@ -7730,10 +7726,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; struct rtl8169_counters *counters = tp->counters; unsigned int start; - if (netif_running(dev)) + pm_runtime_get_noresume(&pdev->dev); + + if (netif_running(dev) && pm_runtime_active(&pdev->dev)) rtl8169_rx_missed(dev, ioaddr); do { @@ -7761,7 +7760,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) * Fetch additonal counter values missing in stats collected by driver * from tally counters. */ - rtl8169_update_counters(dev); + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(dev); /* * Subtract values fetched during initalization. @@ -7774,6 +7774,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - le16_to_cpu(tp->tc_offset.tx_aborted); + pm_runtime_put_noidle(&pdev->dev); + return stats; } @@ -7853,6 +7855,10 @@ static int rtl8169_runtime_suspend(struct device *device) rtl8169_net_suspend(dev); + /* Update counters before going runtime suspend */ + rtl8169_rx_missed(dev, tp->mmio_addr); + rtl8169_update_counters(dev); + return 0; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ac43ed914fcf..86449c357168 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1139,7 +1139,8 @@ static int ravb_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { netif_device_detach(ndev); /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); /* Wait for DMA stopping */ error = ravb_stop_dma(ndev); if (error) { @@ -1170,7 +1171,8 @@ static int ravb_set_ringparam(struct net_device *ndev, ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - ravb_ptp_init(ndev, priv->pdev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); netif_device_attach(ndev); } @@ -1298,7 +1300,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); /* Wait for DMA stopping */ ravb_stop_dma(ndev); @@ -1311,7 +1314,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - ravb_ptp_init(ndev, priv->pdev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); } @@ -1718,7 +1722,6 @@ static int ravb_set_gti(struct net_device *ndev) static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; struct ravb_private *priv; enum ravb_chip_id chip_id; struct net_device *ndev; @@ -1750,8 +1753,7 @@ static int ravb_probe(struct platform_device *pdev) ndev->base_addr = res->start; ndev->dma = -1; - match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev); - chip_id = (enum ravb_chip_id)match->data; + chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); if (chip_id == RCAR_GEN3) irq = platform_get_irq_byname(pdev, "ch22"); @@ -1814,10 +1816,6 @@ static int ravb_probe(struct platform_device *pdev) CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); } - /* Set CSEL value */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB, - CCC); - /* Set GTI value */ error = ravb_set_gti(ndev); if (error) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index dfa9e59c9442..738449992876 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3061,15 +3061,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->ether_link_active_low = pd->ether_link_active_low; /* set cpu data */ - if (id) { + if (id) mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; - } else { - const struct of_device_id *match; + else + mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); - match = of_match_device(of_match_ptr(sh_eth_match_table), - &pdev->dev); - mdp->cd = (struct sh_eth_cpu_data *)match->data; - } mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); if (!mdp->reg_offset) { dev_err(&pdev->dev, "Unknown register type (%d)\n", diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index a4ab71d43e4e..166a7fc87e2f 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work) info.addr = lw->addr; info.vid = lw->vid; + rtnl_lock(); if (learned && removing) call_switchdev_notifiers(SWITCHDEV_FDB_DEL, lw->rocker_port->dev, &info.info); else if (learned && !removing) call_switchdev_notifiers(SWITCHDEV_FDB_ADD, lw->rocker_port->dev, &info.info); + rtnl_unlock(); rocker_port_kfree(lw->trans, work); } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 0e2fc1a844ab..db7db8ac4ca3 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2342,8 +2342,8 @@ static int smc_drv_probe(struct platform_device *pdev) } ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { - ret = -ENODEV; + if (ndev->irq < 0) { + ret = ndev->irq; goto out_release_io; } /* diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 0faf16336035..efb54f356a67 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev) struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; int addr, found; - struct device_node *mdio_node = NULL; - struct device_node *child_node = NULL; + struct device_node *mdio_node = priv->plat->mdio_node; if (!mdio_bus_data) return 0; if (IS_ENABLED(CONFIG_OF)) { - for_each_child_of_node(priv->device->of_node, child_node) { - if (of_device_is_compatible(child_node, - "snps,dwmac-mdio")) { - mdio_node = child_node; - break; - } - } - if (mdio_node) { netdev_dbg(ndev, "FOUND MDIO subnode\n"); } else { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6a52fa18cbf2..4514ba73d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -110,6 +110,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + struct device_node *child_node = NULL; plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) @@ -140,13 +141,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->phy_node = of_node_get(np); } + for_each_child_of_node(np, child_node) + if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) { + plat->mdio_node = child_node; + break; + } + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name) + if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node) plat->mdio_bus_data = NULL; else plat->mdio_bus_data = diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index e23a642357e7..2437227712dc 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -51,7 +51,6 @@ #endif #ifdef CONFIG_PPC_PMAC -#include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index cc106d892e29..23fa29877f5b 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -389,17 +389,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) if (vio_version_after_eq(&port->vio, 1, 8)) { struct vio_net_dext *dext = vio_net_ext(desc); + skb_reset_network_header(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { if (skb->protocol == ETH_P_IP) { - struct iphdr *iph = (struct iphdr *)skb->data; + struct iphdr *iph = ip_hdr(skb); iph->check = 0; ip_send_check(iph); } } if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && - skb->ip_summed == CHECKSUM_NONE) - vnet_fullcsum(skb); + skb->ip_summed == CHECKSUM_NONE) { + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + int ihl = iph->ihl * 4; + + skb_reset_transport_header(skb); + skb_set_transport_header(skb, ihl); + vnet_fullcsum(skb); + } + } if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_level = 0; diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 70814b7386b3..af11ed1e0bcc 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -426,7 +426,7 @@ #define DWC_MMC_RXOCTETCOUNT_GB 0x0784 #define DWC_MMC_RXPACKETCOUNT_GB 0x0780 -static int debug = 3; +static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); @@ -650,6 +650,11 @@ struct net_local { u32 mmc_tx_counters_mask; struct dwceqos_flowcontrol flowcontrol; + + /* Tracks the intermediate state of phy started but hardware + * init not finished yet. + */ + bool phy_defer; }; static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, @@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev) struct phy_device *phydev = lp->phy_dev; int status_change = 0; + if (lp->phy_defer) + return; + if (phydev->link) { if ((lp->speed != phydev->speed) || (lp->duplex != phydev->duplex)) { @@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp) /* Allocate DMA descriptors */ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->rx_descs_addr, 0); + &lp->rx_descs_addr, GFP_KERNEL); if (!lp->rx_descs) goto err_out; lp->rx_descs_tail_addr = lp->rx_descs_addr + @@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp) size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->tx_descs_addr, 0); + &lp->tx_descs_addr, GFP_KERNEL); if (!lp->tx_descs) goto err_out; lp->tx_descs_tail_addr = lp->tx_descs_addr + @@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp) regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); + + lp->phy_defer = false; + mutex_lock(&lp->phy_dev->lock); + phy_read_status(lp->phy_dev); + dwceqos_adjust_link(lp->ndev); + mutex_unlock(&lp->phy_dev->lock); } static void dwceqos_tx_reclaim(unsigned long data) @@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev) } netdev_reset_queue(ndev); - napi_enable(&lp->napi); + /* The dwceqos reset state machine requires all phy clocks to complete, + * hence the unusual init order with phy_start first. + */ + lp->phy_defer = true; phy_start(lp->phy_dev); dwceqos_init_hw(lp); + napi_enable(&lp->napi); netif_start_queue(ndev); tasklet_enable(&lp->tx_bdreclaim_tasklet); @@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); - phy_stop(lp->phy_dev); - tasklet_disable(&lp->tx_bdreclaim_tasklet); - netif_stop_queue(ndev); napi_disable(&lp->napi); - dwceqos_drain_dma(lp); + /* Stop all tx before we drain the tx dma. */ + netif_tx_lock_bh(lp->ndev); + netif_stop_queue(ndev); + netif_tx_unlock_bh(lp->ndev); - netif_tx_lock(lp->ndev); + dwceqos_drain_dma(lp); dwceqos_reset_hw(lp); + phy_stop(lp->phy_dev); + dwceqos_descriptor_free(lp); - netif_tx_unlock(lp->ndev); return 0; } @@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) ((trans.initial_descriptor + trans.nr_descriptors) % DWCEQOS_TX_DCNT)); - dwceqos_tx_finalize(skb, lp, &trans); - - netdev_sent_queue(ndev, skb->len); - spin_lock_bh(&lp->tx_lock); lp->tx_free -= trans.nr_descriptors; + dwceqos_tx_finalize(skb, lp, &trans); + netdev_sent_queue(ndev, skb->len); spin_unlock_bh(&lp->tx_lock); ndev->trans_start = jiffies; diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index e9cc61e1ec74..c3e85acfdc70 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -63,8 +63,12 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, mode = AM33XX_GMII_SEL_MODE_RGMII; break; - case PHY_INTERFACE_MODE_MII: default: + dev_warn(priv->dev, + "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", + phy_modes(phy_mode)); + /* fallthrough */ + case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; }; @@ -106,8 +110,12 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, mode = AM33XX_GMII_SEL_MODE_RGMII; break; - case PHY_INTERFACE_MODE_MII: default: + dev_warn(priv->dev, + "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", + phy_modes(phy_mode)); + /* fallthrough */ + case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; }; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 657b65bf5cac..18bf3a8fdc50 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -82,7 +82,7 @@ struct cpdma_desc { struct cpdma_desc_pool { phys_addr_t phys; - u32 hw_addr; + dma_addr_t hw_addr; void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ int desc_size, mem_size; @@ -152,7 +152,7 @@ struct cpdma_chan { * abstract out these details */ static struct cpdma_desc_pool * -cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, +cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, int size, int align) { int bitmap_size; @@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, if (phys) { pool->phys = phys; - pool->iomap = ioremap(phys, size); + pool->iomap = ioremap(phys, size); /* should be memremap? */ pool->hw_addr = hw_addr; } else { - pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, + pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, GFP_KERNEL); - pool->iomap = pool->cpumap; - pool->hw_addr = pool->phys; + pool->iomap = (void __iomem __force *)pool->cpumap; + pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } if (pool->iomap) diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c61d66d38634..029841f98c32 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -117,21 +117,17 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, *ndesc = le32_to_cpu(desc->next_desc); } -static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) +static u32 get_sw_data(int index, struct knav_dma_desc *desc) { - *pad0 = le32_to_cpu(desc->pad[0]); - *pad1 = le32_to_cpu(desc->pad[1]); - *pad2 = le32_to_cpu(desc->pad[2]); + /* No Endian conversion needed as this data is untouched by hw */ + return desc->sw_data[index]; } -static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) -{ - u64 pad64; - - pad64 = le32_to_cpu(desc->pad[0]) + - ((u64)le32_to_cpu(desc->pad[1]) << 32); - *padptr = (void *)(uintptr_t)pad64; -} +/* use these macros to get sw data */ +#define GET_SW_DATA0(desc) get_sw_data(0, desc) +#define GET_SW_DATA1(desc) get_sw_data(1, desc) +#define GET_SW_DATA2(desc) get_sw_data(2, desc) +#define GET_SW_DATA3(desc) get_sw_data(3, desc) static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, struct knav_dma_desc *desc) @@ -163,13 +159,18 @@ static void set_desc_info(u32 desc_info, u32 pkt_info, desc->packet_info = cpu_to_le32(pkt_info); } -static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) +static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc) { - desc->pad[0] = cpu_to_le32(pad0); - desc->pad[1] = cpu_to_le32(pad1); - desc->pad[2] = cpu_to_le32(pad1); + /* No Endian conversion needed as this data is untouched by hw */ + desc->sw_data[index] = data; } +/* use these macros to set sw data */ +#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc) +#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc) +#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc) +#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc) + static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, struct knav_dma_desc *desc) { @@ -581,7 +582,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, dma_addr_t dma_desc, dma_buf; unsigned int buf_len, dma_sz = sizeof(*ndesc); void *buf_ptr; - u32 pad[2]; u32 tmp; get_words(&dma_desc, 1, &desc->next_desc); @@ -593,14 +593,20 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, break; } get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); - get_pad_ptr(&buf_ptr, ndesc); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + buf_ptr = (void *)GET_SW_DATA0(ndesc); + buf_len = (int)GET_SW_DATA1(desc); dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); __free_page(buf_ptr); knav_pool_desc_put(netcp->rx_pool, desc); } - - get_pad_info(&pad[0], &pad[1], &buf_len, desc); - buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + buf_ptr = (void *)GET_SW_DATA0(desc); + buf_len = (int)GET_SW_DATA1(desc); if (buf_ptr) netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); @@ -639,7 +645,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) dma_addr_t dma_desc, dma_buff; struct netcp_packet p_info; struct sk_buff *skb; - u32 pad[2]; void *org_buf_ptr; dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); @@ -653,8 +658,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); - get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); - org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + org_buf_ptr = (void *)GET_SW_DATA0(desc); + org_buf_len = (int)GET_SW_DATA1(desc); if (unlikely(!org_buf_ptr)) { dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); @@ -679,7 +687,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) /* Fill in the page fragment list */ while (dma_desc) { struct page *page; - void *ptr; ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); if (unlikely(!ndesc)) { @@ -688,8 +695,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); - get_pad_ptr(&ptr, ndesc); - page = ptr; + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + page = (struct page *)GET_SW_DATA0(desc); if (likely(dma_buff && buf_len && page)) { dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, @@ -777,7 +786,10 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) } get_org_pkt_info(&dma, &buf_len, desc); - get_pad_ptr(&buf_ptr, desc); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + buf_ptr = (void *)GET_SW_DATA0(desc); if (unlikely(!dma)) { dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); @@ -829,7 +841,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) struct page *page; dma_addr_t dma; void *bufptr; - u32 pad[3]; + u32 sw_data[2]; /* Allocate descriptor */ hwdesc = knav_pool_desc_get(netcp->rx_pool); @@ -846,7 +858,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bufptr = netdev_alloc_frag(primary_buf_len); - pad[2] = primary_buf_len; + sw_data[1] = primary_buf_len; if (unlikely(!bufptr)) { dev_warn_ratelimited(netcp->ndev_dev, @@ -858,9 +870,10 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (unlikely(dma_mapping_error(netcp->dev, dma))) goto fail; - pad[0] = lower_32_bits((uintptr_t)bufptr); - pad[1] = upper_32_bits((uintptr_t)bufptr); - + /* warning!!!! We are saving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + sw_data[0] = (u32)bufptr; } else { /* Allocate a secondary receive queue entry */ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); @@ -870,9 +883,11 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) } buf_len = PAGE_SIZE; dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); - pad[0] = lower_32_bits(dma); - pad[1] = upper_32_bits(dma); - pad[2] = 0; + /* warning!!!! We are saving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + sw_data[0] = (u32)page; + sw_data[1] = 0; } desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; @@ -882,7 +897,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << KNAV_DMA_DESC_RETQ_SHIFT; set_org_pkt_info(dma, buf_len, hwdesc); - set_pad_info(pad[0], pad[1], pad[2], hwdesc); + SET_SW_DATA0(sw_data[0], hwdesc); + SET_SW_DATA1(sw_data[1], hwdesc); set_desc_info(desc_info, pkt_info, hwdesc); /* Push to FDQs */ @@ -971,7 +987,6 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, unsigned int budget) { struct knav_dma_desc *desc; - void *ptr; struct sk_buff *skb; unsigned int dma_sz; dma_addr_t dma; @@ -988,8 +1003,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, continue; } - get_pad_ptr(&ptr, desc); - skb = ptr; + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + skb = (struct sk_buff *)GET_SW_DATA0(desc); netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) { dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); @@ -1194,10 +1211,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, } set_words(&tmp, 1, &desc->packet_info); - tmp = lower_32_bits((uintptr_t)&skb); - set_words(&tmp, 1, &desc->pad[0]); - tmp = upper_32_bits((uintptr_t)&skb); - set_words(&tmp, 1, &desc->pad[1]); + /* warning!!!! We are saving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + SET_SW_DATA0((u32)skb, desc); if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { tmp = tx_pipe->switch_to_port; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3c54a2cae5df..67610270d171 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -48,7 +48,6 @@ #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/bitops.h> -#include <asm/pci-bridge.h> #include <net/checksum.h> #include "spider_net.h" |