diff options
Diffstat (limited to 'drivers/net/ethernet')
333 files changed, 34114 insertions, 16586 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 79e1a0282163..d81fceddbe0e 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1601,15 +1601,9 @@ vortex_up(struct net_device *dev) dev->name, media_tbl[dev->if_port].name); } - init_timer(&vp->timer); - vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); - vp->timer.data = (unsigned long)dev; - vp->timer.function = vortex_timer; /* timer handler */ - add_timer(&vp->timer); - - init_timer(&vp->rx_oom_timer); - vp->rx_oom_timer.data = (unsigned long)dev; - vp->rx_oom_timer.function = rx_oom_timer; + setup_timer(&vp->timer, vortex_timer, (unsigned long)dev); + mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); + setup_timer(&vp->rx_oom_timer, rx_oom_timer, (unsigned long)dev); if (vortex_debug > 1) pr_debug("%s: Initial media type %s.\n", @@ -2461,7 +2455,7 @@ boomerang_interrupt(int irq, void *dev_id) int i; pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].frag[0].addr), - le32_to_cpu(vp->tx_ring[entry].frag[0].length), + le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, PCI_DMA_TODEVICE); for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 2777289a26c0..2f79d29f17f2 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -1501,6 +1501,7 @@ static const struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), + PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009), PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 0b13af8e4070..2ffd63463299 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -106,6 +106,7 @@ config LANTIQ_ETOP Support for the MII0 inside the Lantiq SoC source "drivers/net/ethernet/marvell/Kconfig" +source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" @@ -138,7 +139,6 @@ config NET_NETX source "drivers/net/ethernet/nuvoton/Kconfig" source "drivers/net/ethernet/nvidia/Kconfig" source "drivers/net/ethernet/nxp/Kconfig" -source "drivers/net/ethernet/octeon/Kconfig" source "drivers/net/ethernet/oki-semi/Kconfig" config ETHOC diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 38dc1a776a2b..1d349e9aa9a6 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ +obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ @@ -58,7 +59,6 @@ obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ obj-$(CONFIG_LPC_ENET) += nxp/ -obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 3f3bcbea15bd..0907ab6ff309 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -2380,7 +2380,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) sizeof(u32), &tx_ring->tx_status_pa, GFP_KERNEL); - if (!tx_ring->tx_status_pa) { + if (!tx_ring->tx_status) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx status block\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 17472851674f..f749e4d389eb 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev) priv->mdio->id); mdiobus_unregister(priv->mdio); - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); priv->mdio = NULL; } diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 87e727b921dc..fcdf5dda448f 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -50,8 +50,8 @@ static const char version[] = static void write_rreg(u_long base, u_int reg, u_int val) { asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "str%?h %0, [%2, #-4] @ NET_RDP" + "strh %1, [%2] @ NET_RAP\n\t" + "strh %0, [%2, #-4] @ NET_RDP" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); } @@ -60,8 +60,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg) { unsigned short v; asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "ldr%?h %0, [%2, #-4] @ NET_RDP" + "strh %1, [%2] @ NET_RAP\n\t" + "ldrh %0, [%2, #-4] @ NET_RDP" : "=r" (v) : "r" (reg), "r" (ISAIO_BASE + 0x0464)); return v; @@ -70,8 +70,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg) static inline void write_ireg(u_long base, u_int reg, u_int val) { asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "str%?h %0, [%2, #8] @ NET_IDP" + "strh %1, [%2] @ NET_RAP\n\t" + "strh %0, [%2, #8] @ NET_IDP" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); } @@ -80,8 +80,8 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg) { u_short v; asm volatile( - "str%?h %1, [%2] @ NAT_RAP\n\t" - "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" + "strh %1, [%2] @ NAT_RAP\n\t" + "ldrh %0, [%2, #8] @ NET_IDP\n\t" : "=r" (v) : "r" (reg), "r" (ISAIO_BASE + 0x0464)); return v; @@ -96,7 +96,7 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne offset = ISAMEM_BASE + (offset << 1); length = (length + 1) & ~1; if ((int)buf & 2) { - asm volatile("str%?h %2, [%0], #4" + asm volatile("strh %2, [%0], #4" : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); buf += 2; length -= 2; @@ -104,20 +104,20 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne while (length > 8) { register unsigned int tmp asm("r2"), tmp2 asm("r3"); asm volatile( - "ldm%?ia %0!, {%1, %2}" + "ldmia %0!, {%1, %2}" : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); length -= 8; asm volatile( - "str%?h %1, [%0], #4\n\t" - "mov%? %1, %1, lsr #16\n\t" - "str%?h %1, [%0], #4\n\t" - "str%?h %2, [%0], #4\n\t" - "mov%? %2, %2, lsr #16\n\t" - "str%?h %2, [%0], #4" + "strh %1, [%0], #4\n\t" + "mov %1, %1, lsr #16\n\t" + "strh %1, [%0], #4\n\t" + "strh %2, [%0], #4\n\t" + "mov %2, %2, lsr #16\n\t" + "strh %2, [%0], #4" : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); } while (length > 0) { - asm volatile("str%?h %2, [%0], #4" + asm volatile("strh %2, [%0], #4" : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); buf += 2; length -= 2; @@ -132,23 +132,23 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned if ((int)buf & 2) { unsigned int tmp; asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "str%?b %2, [%1], #1\n\t" - "mov%? %2, %2, lsr #8\n\t" - "str%?b %2, [%1], #1" + "ldrh %2, [%0], #4\n\t" + "strb %2, [%1], #1\n\t" + "mov %2, %2, lsr #8\n\t" + "strb %2, [%1], #1" : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); length -= 2; } while (length > 8) { register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "ldr%?h %4, [%0], #4\n\t" - "ldr%?h %3, [%0], #4\n\t" - "orr%? %2, %2, %4, lsl #16\n\t" - "ldr%?h %4, [%0], #4\n\t" - "orr%? %3, %3, %4, lsl #16\n\t" - "stm%?ia %1!, {%2, %3}" + "ldrh %2, [%0], #4\n\t" + "ldrh %4, [%0], #4\n\t" + "ldrh %3, [%0], #4\n\t" + "orr %2, %2, %4, lsl #16\n\t" + "ldrh %4, [%0], #4\n\t" + "orr %3, %3, %4, lsl #16\n\t" + "stmia %1!, {%2, %3}" : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) : "0" (offset), "1" (buf)); length -= 8; @@ -156,10 +156,10 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned while (length > 0) { unsigned int tmp; asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "str%?b %2, [%1], #1\n\t" - "mov%? %2, %2, lsr #8\n\t" - "str%?b %2, [%1], #1" + "ldrh %2, [%0], #4\n\t" + "strb %2, [%1], #1\n\t" + "mov %2, %2, lsr #8\n\t" + "strb %2, [%1], #1" : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); length -= 2; } diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 256f590f6bb1..3a7ebfdda57d 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -547,8 +547,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int /* Make certain the data structures used by the LANCE are aligned and DMAble. */ lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); - if(lp==NULL) - return -ENODEV; + if (!lp) + return -ENOMEM; if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); dev->ml_priv = lp; lp->name = chipname; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index b6fa89102526..bbef95973c27 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -768,12 +768,16 @@ #define MTL_Q_TQDR 0x08 #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 -#define MTL_Q_RQDR 0x4c +#define MTL_Q_RQDR 0x48 #define MTL_Q_RQFCR 0x50 #define MTL_Q_IER 0x70 #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_INDEX 16 +#define MTL_Q_RQDR_PRXQ_WIDTH 14 +#define MTL_Q_RQDR_RXQSTS_INDEX 4 +#define MTL_Q_RQDR_RXQSTS_WIDTH 2 #define MTL_Q_RQFCR_RFA_INDEX 1 #define MTL_Q_RQFCR_RFA_WIDTH 6 #define MTL_Q_RQFCR_RFD_INDEX 17 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index a6b9899e285f..895d35639129 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -146,6 +146,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); unsigned int i, tc_ets, tc_ets_weight; + u8 max_tc = 0; tc_ets = 0; tc_ets_weight = 0; @@ -157,12 +158,9 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]); - if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && - (i >= pdata->hw_feat.tc_cnt)) - return -EINVAL; - - if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt) - return -EINVAL; + max_tc = max_t(u8, max_tc, ets->prio_tc[i]); + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i])) + max_tc = max_t(u8, max_tc, i); switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: @@ -171,15 +169,28 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, tc_ets = 1; tc_ets_weight += ets->tc_tx_bw[i]; break; - default: + netif_err(pdata, drv, netdev, + "unsupported TSA algorithm (%hhu)\n", + ets->tc_tsa[i]); return -EINVAL; } } + /* Check maximum traffic class requested */ + if (max_tc >= pdata->hw_feat.tc_cnt) { + netif_err(pdata, drv, netdev, + "exceeded number of supported traffic classes\n"); + return -EINVAL; + } + /* Weights must add up to 100% */ - if (tc_ets && (tc_ets_weight != 100)) + if (tc_ets && (tc_ets_weight != 100)) { + netif_err(pdata, drv, netdev, + "sum of ETS algorithm weights is not 100 (%u)\n", + tc_ets_weight); return -EINVAL; + } if (!pdata->ets) { pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets), @@ -188,6 +199,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, return -ENOMEM; } + pdata->num_tcs = max_tc + 1; memcpy(pdata->ets, ets, sizeof(*pdata->ets)); pdata->hw_if.config_dcb_tc(pdata); @@ -221,6 +233,13 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n", pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); + /* Check PFC for supported number of traffic classes */ + if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) { + netif_err(pdata, drv, netdev, + "PFC requested for unsupported traffic class\n"); + return -EINVAL; + } + if (!pdata->pfc) { pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), GFP_KERNEL); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index f6a7161e3b85..1babcc11a248 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -518,13 +518,45 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { + struct ieee_pfc *pfc = pdata->pfc; + struct ieee_ets *ets = pdata->ets; unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Set MTL flow control */ - for (i = 0; i < pdata->rx_q_count; i++) - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); + for (i = 0; i < pdata->rx_q_count; i++) { + unsigned int ehfc = 0; + + if (pfc && ets) { + unsigned int prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + unsigned int tc; + + /* Does this queue handle the priority? */ + if (pdata->prio2q_map[prio] != i) + continue; + + /* Get the Traffic Class for this priority */ + tc = ets->prio_tc[prio]; + + /* Check if flow control should be enabled */ + if (pfc->pfc_en & (1 << tc)) { + ehfc = 1; + break; + } + } + } else { + ehfc = 1; + } + + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); + + netif_dbg(pdata, drv, pdata->netdev, + "flow control %s for RXq%u\n", + ehfc ? "enabled" : "disabled", i); + } /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; @@ -702,6 +734,113 @@ static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) return 0; } +static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + /* Put the VLAN tag in the Rx descriptor */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); + + /* Don't check the VLAN type */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); + + /* Check only C-TAG (0x8100) packets */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); + + /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); + + /* Enable VLAN tag stripping */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); + + return 0; +} + +static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); + + return 0; +} + +static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Enable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); + + /* Enable VLAN Hash Table filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); + + /* Disable VLAN tag inverse matching */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); + + /* Only filter on the lower 12-bits of the VLAN tag */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); + + /* In order for the VLAN Hash Table filtering to be effective, + * the VLAN tag identifier in the VLAN Tag Register must not + * be zero. Set the VLAN tag identifier to "1" to enable the + * VLAN Hash Table filtering. This implies that a VLAN tag of + * 1 will always pass filtering. + */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); + + return 0; +} + +static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Disable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); + + return 0; +} + +static u32 xgbe_vid_crc32_le(__le16 vid_le) +{ + u32 poly = 0xedb88320; /* CRCPOLY_LE */ + u32 crc = ~0; + u32 temp = 0; + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= poly; + } + + return crc; +} + +static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) +{ + u32 crc; + u16 vid; + __le16 vid_le; + u16 vlan_hash_table = 0; + + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } + + /* Set the VLAN Hash Table filtering register */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); + + return 0; +} + static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) { @@ -714,6 +853,14 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); + /* Hardware will still perform VLAN filtering in promiscuous mode */ + if (enable) { + xgbe_disable_rx_vlan_filtering(pdata); + } else { + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + xgbe_enable_rx_vlan_filtering(pdata); + } + return 0; } @@ -875,6 +1022,7 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { + unsigned long flags; unsigned int mmd_address; int mmd_data; @@ -892,10 +1040,10 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ - mutex_lock(&pdata->xpcs_mutex); + spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); - mutex_unlock(&pdata->xpcs_mutex); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); return mmd_data; } @@ -904,6 +1052,7 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { unsigned int mmd_address; + unsigned long flags; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; @@ -919,10 +1068,10 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ - mutex_lock(&pdata->xpcs_mutex); + spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); - mutex_unlock(&pdata->xpcs_mutex); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) @@ -944,116 +1093,6 @@ static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) return 0; } -static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) -{ - /* Put the VLAN tag in the Rx descriptor */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); - - /* Don't check the VLAN type */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); - - /* Check only C-TAG (0x8100) packets */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); - - /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); - - /* Enable VLAN tag stripping */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); - - return 0; -} - -static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) -{ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); - - return 0; -} - -static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) -{ - /* Enable VLAN filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); - - /* Enable VLAN Hash Table filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); - - /* Disable VLAN tag inverse matching */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); - - /* Only filter on the lower 12-bits of the VLAN tag */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); - - /* In order for the VLAN Hash Table filtering to be effective, - * the VLAN tag identifier in the VLAN Tag Register must not - * be zero. Set the VLAN tag identifier to "1" to enable the - * VLAN Hash Table filtering. This implies that a VLAN tag of - * 1 will always pass filtering. - */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); - - return 0; -} - -static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) -{ - /* Disable VLAN filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); - - return 0; -} - -#ifndef CRCPOLY_LE -#define CRCPOLY_LE 0xedb88320 -#endif -static u32 xgbe_vid_crc32_le(__le16 vid_le) -{ - u32 poly = CRCPOLY_LE; - u32 crc = ~0; - u32 temp = 0; - unsigned char *data = (unsigned char *)&vid_le; - unsigned char data_byte = 0; - int i, bits; - - bits = get_bitmask_order(VLAN_VID_MASK); - for (i = 0; i < bits; i++) { - if ((i % 8) == 0) - data_byte = data[i / 8]; - - temp = ((crc & 1) ^ data_byte) & 1; - crc >>= 1; - data_byte >>= 1; - - if (temp) - crc ^= poly; - } - - return crc; -} - -static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) -{ - u32 crc; - u16 vid; - __le16 vid_le; - u16 vlan_hash_table = 0; - - /* Generate the VLAN Hash Table value */ - for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { - /* Get the CRC32 value of the VLAN ID */ - vid_le = cpu_to_le16(vid); - crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; - - vlan_hash_table |= (1 << crc); - } - - /* Set the VLAN Hash Table filtering register */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); - - return 0; -} - static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) { struct xgbe_ring_desc *rdesc = rdata->rdesc; @@ -1288,11 +1327,42 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, return 0; } +static void xgbe_config_tc(struct xgbe_prv_data *pdata) +{ + unsigned int offset, queue, prio; + u8 i; + + netdev_reset_tc(pdata->netdev); + if (!pdata->num_tcs) + return; + + netdev_set_num_tc(pdata->netdev, pdata->num_tcs); + + for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { + while ((queue < pdata->tx_q_count) && + (pdata->q2tc_map[queue] == i)) + queue++; + + netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n", + i, offset, queue - 1); + netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset); + offset = queue; + } + + if (!pdata->ets) + return; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + netdev_set_prio_tc_map(pdata->netdev, prio, + pdata->ets->prio_tc[prio]); +} + static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) { struct ieee_ets *ets = pdata->ets; unsigned int total_weight, min_weight, weight; - unsigned int i; + unsigned int mask, reg, reg_val; + unsigned int i, prio; if (!ets) return; @@ -1309,6 +1379,25 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) min_weight = 1; for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + /* Map the priorities to the traffic class */ + mask = 0; + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + if (ets->prio_tc[prio] == i) + mask |= (1 << prio); + } + mask &= 0xff; + + netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n", + i, mask); + reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG)); + reg_val = XGMAC_IOREAD(pdata, reg); + + reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3)); + reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3)); + + XGMAC_IOWRITE(pdata, reg, reg_val); + + /* Set the traffic class algorithm */ switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: netif_dbg(pdata, drv, pdata->netdev, @@ -1329,38 +1418,12 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) break; } } + + xgbe_config_tc(pdata); } static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) { - struct ieee_pfc *pfc = pdata->pfc; - struct ieee_ets *ets = pdata->ets; - unsigned int mask, reg, reg_val; - unsigned int tc, prio; - - if (!pfc || !ets) - return; - - for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { - mask = 0; - for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { - if ((pfc->pfc_en & (1 << prio)) && - (ets->prio_tc[prio] == tc)) - mask |= (1 << prio); - } - mask &= 0xff; - - netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n", - tc, mask); - reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); - reg_val = XGMAC_IOREAD(pdata, reg); - - reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); - reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); - - XGMAC_IOWRITE(pdata, reg, reg_val); - } - xgbe_config_flow_control(pdata); } @@ -2595,6 +2658,32 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) } } +static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, + unsigned int queue) +{ + unsigned int rx_status; + unsigned long rx_timeout; + + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ + rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, rx_timeout)) { + rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); + if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && + (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, rx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + static void xgbe_enable_rx(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; @@ -2633,6 +2722,10 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < pdata->rx_q_count; i++) + xgbe_prepare_rx_stop(pdata, i); + /* Disable each Rx queue */ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); @@ -2881,6 +2974,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; /* For Data Center Bridging config */ + hw_if->config_tc = xgbe_config_tc; hw_if->config_dcb_tc = xgbe_config_dcb_tc; hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 8a9b493566c9..ebf9224b2d31 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -356,7 +356,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule(&pdata->napi); + __napi_schedule_irqoff(&pdata->napi); } } @@ -409,7 +409,7 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) disable_irq_nosync(channel->dma_irq); /* Turn on polling */ - __napi_schedule(&channel->napi); + __napi_schedule_irqoff(&channel->napi); } return IRQ_HANDLED; @@ -1626,30 +1626,22 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ -static int xgbe_setup_tc(struct net_device *netdev, u8 tc) +static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc_to_netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - unsigned int offset, queue; - u8 i; + u8 tc; - if (tc && (tc != pdata->hw_feat.tc_cnt)) + if (tc_to_netdev->type != TC_SETUP_MQPRIO) return -EINVAL; - if (tc) { - netdev_set_num_tc(netdev, tc); - for (i = 0, queue = 0, offset = 0; i < tc; i++) { - while ((queue < pdata->tx_q_count) && - (pdata->q2tc_map[queue] == i)) - queue++; - - netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n", - i, offset, queue - 1); - netdev_set_tc_queue(netdev, i, queue - offset, offset); - offset = queue; - } - } else { - netdev_reset_tc(netdev); - } + tc = tc_to_netdev->tc; + + if (tc > pdata->hw_feat.tc_cnt) + return -EINVAL; + + pdata->num_tcs = tc; + pdata->hw_if.config_tc(pdata); return 0; } @@ -2062,7 +2054,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget) /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ - napi_complete(napi); + napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ enable_irq(channel->dma_irq); @@ -2104,7 +2096,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget) /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ - napi_complete(napi); + napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ xgbe_enable_rx_tx_ints(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 6040293db9c1..11d9f0c5b78b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -318,8 +318,20 @@ static int xgbe_set_settings(struct net_device *netdev, if (cmd->autoneg == AUTONEG_DISABLE) { switch (speed) { case SPEED_10000: + break; case SPEED_2500: + if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) { + netdev_err(netdev, "unsupported speed %u\n", + speed); + return -EINVAL; + } + break; case SPEED_1000: + if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) { + netdev_err(netdev, "unsupported speed %u\n", + speed); + return -EINVAL; + } break; default: netdev_err(netdev, "unsupported speed %u\n", speed); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 618d952c2984..3eee3201b58f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -363,7 +363,7 @@ static int xgbe_probe(struct platform_device *pdev) platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); - mutex_init(&pdata->xpcs_mutex); + spin_lock_init(&pdata->xpcs_lock); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 446058081866..84c5d296d13e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -626,10 +626,22 @@ static irqreturn_t xgbe_an_isr(int irq, void *data) netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n"); - /* Interrupt reason must be read and cleared outside of IRQ context */ - disable_irq_nosync(pdata->an_irq); + /* Disable AN interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + + /* Save the interrupt(s) that fired */ + pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); - queue_work(pdata->an_workqueue, &pdata->an_irq_work); + if (pdata->an_int) { + /* Clear the interrupt(s) that fired and process them */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); + + queue_work(pdata->an_workqueue, &pdata->an_irq_work); + } else { + /* Enable AN interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, + XGBE_AN_INT_MASK); + } return IRQ_HANDLED; } @@ -673,34 +685,26 @@ static void xgbe_an_state_machine(struct work_struct *work) struct xgbe_prv_data, an_work); enum xgbe_an cur_state = pdata->an_state; - unsigned int int_reg, int_mask; mutex_lock(&pdata->an_mutex); - /* Read the interrupt */ - int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); - if (!int_reg) + if (!pdata->an_int) goto out; next_int: - if (int_reg & XGBE_AN_PG_RCV) { + if (pdata->an_int & XGBE_AN_PG_RCV) { pdata->an_state = XGBE_AN_PAGE_RECEIVED; - int_mask = XGBE_AN_PG_RCV; - } else if (int_reg & XGBE_AN_INC_LINK) { + pdata->an_int &= ~XGBE_AN_PG_RCV; + } else if (pdata->an_int & XGBE_AN_INC_LINK) { pdata->an_state = XGBE_AN_INCOMPAT_LINK; - int_mask = XGBE_AN_INC_LINK; - } else if (int_reg & XGBE_AN_INT_CMPLT) { + pdata->an_int &= ~XGBE_AN_INC_LINK; + } else if (pdata->an_int & XGBE_AN_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; - int_mask = XGBE_AN_INT_CMPLT; + pdata->an_int &= ~XGBE_AN_INT_CMPLT; } else { pdata->an_state = XGBE_AN_ERROR; - int_mask = 0; } - /* Clear the interrupt to be processed */ - int_reg &= ~int_mask; - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg); - pdata->an_result = pdata->an_state; again: @@ -740,14 +744,14 @@ again: } if (pdata->an_state == XGBE_AN_NO_LINK) { - int_reg = 0; + pdata->an_int = 0; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } else if (pdata->an_state == XGBE_AN_ERROR) { netdev_err(pdata->netdev, "error during auto-negotiation, state=%u\n", cur_state); - int_reg = 0; + pdata->an_int = 0; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } @@ -765,11 +769,12 @@ again: if (cur_state != pdata->an_state) goto again; - if (int_reg) + if (pdata->an_int) goto next_int; out: - enable_irq(pdata->an_irq); + /* Enable AN interrupts on the way out */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK); mutex_unlock(&pdata->an_mutex); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e234b9970318..98d9d63c4353 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -673,6 +673,7 @@ struct xgbe_hw_if { u64 (*get_tx_tstamp)(struct xgbe_prv_data *); /* For Data Center Bridging config */ + void (*config_tc)(struct xgbe_prv_data *); void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *); @@ -773,8 +774,8 @@ struct xgbe_prv_data { /* Overall device lock */ spinlock_t lock; - /* XPCS indirect addressing mutex */ - struct mutex xpcs_mutex; + /* XPCS indirect addressing lock */ + spinlock_t xpcs_lock; /* RSS addressing mutex */ struct mutex rss_mutex; @@ -880,6 +881,7 @@ struct xgbe_prv_data { struct ieee_pfc *pfc; unsigned int q2tc_map[XGBE_MAX_QUEUES]; unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; + u8 num_tcs; /* Hardware features of the device */ struct xgbe_hw_features hw_feat; @@ -925,6 +927,7 @@ struct xgbe_prv_data { u32 serdes_dfe_tap_ena[XGBE_SPEEDS]; /* Auto-negotiation state machine support */ + unsigned int an_int; struct mutex an_mutex; enum xgbe_an an_result; enum xgbe_an an_state; diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile index 700b5abe5de5..f46321f68315 100644 --- a/drivers/net/ethernet/apm/xgene/Makefile +++ b/drivers/net/ethernet/apm/xgene/Makefile @@ -3,5 +3,6 @@ # xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \ - xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o + xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o \ + xgene_enet_cle.o obj-$(CONFIG_NET_XGENE) += xgene-enet.o diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c new file mode 100644 index 000000000000..b212488606da --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -0,0 +1,734 @@ +/* Applied Micro X-Gene SoC Ethernet Classifier structures + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Authors: Khuong Dinh <kdinh@apm.com> + * Tanmay Inamdar <tinamdar@apm.com> + * Iyappan Subramanian <isubramanian@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "xgene_enet_main.h" + +/* interfaces to convert structures to HW recognized bit formats */ +static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, + enum xgene_cle_prot_type type, u32 len, + u32 *reg) +{ + *reg = SET_VAL(SB_IPFRAG, frag) | + SET_VAL(SB_IPPROT, type) | + SET_VAL(SB_IPVER, ver) | + SET_VAL(SB_HDRLEN, len); +} + +static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, + u32 nfpsel, u32 *idt_reg) +{ + *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | + SET_VAL(IDT_FPSEL, fpsel) | + SET_VAL(IDT_NFPSEL, nfpsel); +} + +static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, + struct xgene_cle_dbptr *dbptr, u32 *buf) +{ + buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | + SET_VAL(CLE_DSTQIDL, dbptr->dstqid); + + buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | + SET_VAL(CLE_PRIORITY, dbptr->cle_priority); +} + +static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf) +{ + u32 i, j = 0; + u32 data; + + buf[j++] = SET_VAL(CLE_TYPE, kn->node_type); + for (i = 0; i < kn->num_keys; i++) { + struct xgene_cle_ptree_key *key = &kn->key[i]; + + if (!(i % 2)) { + buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) | + SET_VAL(CLE_KN_RPTR, key->result_pointer); + } else { + data = SET_VAL(CLE_KN_PRIO, key->priority) | + SET_VAL(CLE_KN_RPTR, key->result_pointer); + buf[j++] |= (data << 16); + } + } +} + +static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn, + u32 *buf, u32 jb) +{ + struct xgene_cle_ptree_branch *br; + u32 i, j = 0; + u32 npp; + + buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) | + SET_VAL(CLE_DN_LASTN, dn->last_node) | + SET_VAL(CLE_DN_HLS, dn->hdr_len_store) | + SET_VAL(CLE_DN_EXT, dn->hdr_extn) | + SET_VAL(CLE_DN_BSTOR, dn->byte_store) | + SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) | + SET_VAL(CLE_DN_RPTR, dn->result_pointer); + + for (i = 0; i < dn->num_branches; i++) { + br = &dn->branch[i]; + npp = br->next_packet_pointer; + + if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE)) + npp += jb; + + buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) | + SET_VAL(CLE_BR_NPPTR, npp) | + SET_VAL(CLE_BR_JB, br->jump_bw) | + SET_VAL(CLE_BR_JR, br->jump_rel) | + SET_VAL(CLE_BR_OP, br->operation) | + SET_VAL(CLE_BR_NNODE, br->next_node) | + SET_VAL(CLE_BR_NBR, br->next_branch); + + buf[j++] = SET_VAL(CLE_BR_DATA, br->data) | + SET_VAL(CLE_BR_MASK, br->mask); + } +} + +static int xgene_cle_poll_cmd_done(void __iomem *base, + enum xgene_cle_cmd_type cmd) +{ + u32 status, loop = 10; + int ret = -EBUSY; + + while (loop--) { + status = ioread32(base + INDCMD_STATUS); + if (status & cmd) { + ret = 0; + break; + } + usleep_range(1000, 2000); + } + + return ret; +} + +static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs, + u32 index, enum xgene_cle_dram_type type, + enum xgene_cle_cmd_type cmd) +{ + enum xgene_cle_parser parser = cle->active_parser; + void __iomem *base = cle->base; + u32 i, j, ind_addr; + u8 port, nparsers; + int ret = 0; + + /* PTREE_RAM onwards, DRAM regions are common for all parsers */ + nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers; + + for (i = 0; i < nparsers; i++) { + port = i; + if ((type < PTREE_RAM) && (parser != PARSER_ALL)) + port = parser; + + ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index; + iowrite32(ind_addr, base + INDADDR); + for (j = 0; j < nregs; j++) + iowrite32(data[j], base + DATA_RAM0 + (j * 4)); + iowrite32(cmd, base + INDCMD); + + ret = xgene_cle_poll_cmd_done(base, cmd); + if (ret) + break; + } + + return ret; +} + +static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + void __iomem *addr, *base = cle->base; + u32 offset = CLE_PORT_OFFSET; + u32 i; + + /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */ + ptree->start_pkt += cle->jump_bytes; + for (i = 0; i < cle->parsers; i++) { + if (cle->active_parser != PARSER_ALL) + addr = base + cle->active_parser * offset; + else + addr = base + (i * offset); + + iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0); + iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0); + } +} + +static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + u32 buf[CLE_DRAM_REGS]; + u32 i; + int ret; + + memset(buf, 0, sizeof(buf)); + for (i = 0; i < ptree->num_dbptr; i++) { + xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf); + ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr, + DB_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + return 0; +} + +static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + struct xgene_cle_ptree_ewdn *dn = ptree->dn; + struct xgene_cle_ptree_kn *kn = ptree->kn; + u32 buf[CLE_DRAM_REGS]; + int i, j, ret; + + memset(buf, 0, sizeof(buf)); + for (i = 0; i < ptree->num_dn; i++) { + xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes); + ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node, + PTREE_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + /* continue node index for key node */ + memset(buf, 0, sizeof(buf)); + for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) { + xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf); + ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node, + PTREE_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + return 0; +} + +static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + int ret; + + ret = xgene_cle_setup_node(pdata, cle); + if (ret) + return ret; + + ret = xgene_cle_setup_dbptr(pdata, cle); + if (ret) + return ret; + + xgene_cle_enable_ptree(pdata, cle); + + return 0; +} + +static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *enet_cle, + struct xgene_cle_dbptr *dbptr, + u32 index, u8 priority) +{ + void __iomem *base = enet_cle->base; + void __iomem *base_addr; + u32 buf[CLE_DRAM_REGS]; + u32 def_cls, offset; + u32 i, j; + + memset(buf, 0, sizeof(buf)); + xgene_cle_dbptr_to_hw(pdata, dbptr, buf); + + for (i = 0; i < enet_cle->parsers; i++) { + if (enet_cle->active_parser != PARSER_ALL) { + offset = enet_cle->active_parser * + CLE_PORT_OFFSET; + } else { + offset = i * CLE_PORT_OFFSET; + } + + base_addr = base + DFCLSRESDB00 + offset; + for (j = 0; j < 6; j++) + iowrite32(buf[j], base_addr + (j * 4)); + + def_cls = ((priority & 0x7) << 10) | (index & 0x3ff); + iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset); + } +} + +static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle) +{ + u32 idx = CLE_PKTRAM_SIZE / sizeof(u32); + u32 mac_hdr_len = ETH_HLEN; + u32 sband, reg = 0; + u32 ipv4_ihl = 5; + u32 hdr_len; + int ret; + + /* Sideband: IPV4/TCP packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, ®); + sband = reg; + + /* Sideband: IPv4/UDP packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, ®); + sband |= (reg << 16); + + ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR); + if (ret) + return ret; + + /* Sideband: IPv4/RAW packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, + hdr_len, ®); + sband = reg; + + /* Sideband: Ethernet II/RAW packets */ + hdr_len = (mac_hdr_len << 5); + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, + hdr_len, ®); + sband |= (reg << 16); + + ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR); + if (ret) + return ret; + + return 0; +} + +static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) +{ + u32 secret_key_ipv4[4]; /* 16 Bytes*/ + int ret = 0; + + get_random_bytes(secret_key_ipv4, 16); + ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0, + RSS_IPV4_HASH_SKEY, CLE_CMD_WR); + return ret; +} + +static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) +{ + u32 fpsel, dstqid, nfpsel, idt_reg, idx; + int i, ret = 0; + u16 pool_id; + + for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { + idx = i % pdata->rxq_cnt; + pool_id = pdata->rx_ring[idx]->buf_pool->id; + fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); + nfpsel = 0; + idt_reg = 0; + + xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg); + ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, + RSS_IDT, CLE_CMD_WR); + if (ret) + return ret; + } + + ret = xgene_cle_set_rss_skeys(&pdata->cle); + if (ret) + return ret; + + return 0; +} + +static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_cle *cle = &pdata->cle; + void __iomem *base = cle->base; + u32 offset, val = 0; + int i, ret = 0; + + offset = CLE_PORT_OFFSET; + for (i = 0; i < cle->parsers; i++) { + if (cle->active_parser != PARSER_ALL) + offset = cle->active_parser * CLE_PORT_OFFSET; + else + offset = i * CLE_PORT_OFFSET; + + /* enable RSS */ + val = (RSS_IPV4_12B << 1) | 0x1; + writel(val, base + RSS_CTRL0 + offset); + } + + /* setup sideband data */ + ret = xgene_cle_set_rss_sband(cle); + if (ret) + return ret; + + /* setup indirection table */ + ret = xgene_cle_set_rss_idt(pdata); + if (ret) + return ret; + + return 0; +} + +static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_cle *enet_cle = &pdata->cle; + struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; + struct xgene_cle_ptree_branch *br; + u32 def_qid, def_fpsel, pool_id; + struct xgene_cle_ptree *ptree; + struct xgene_cle_ptree_kn kn; + int ret; + struct xgene_cle_ptree_ewdn ptree_dn[] = { + { + /* PKT_TYPE_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 2, + .branch = { + { + /* IPV4 */ + .valid = 0, + .next_packet_pointer = 22, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = PKT_PROT_NODE, + .next_branch = 0, + .data = 0x8, + .mask = 0xffff + }, + { + .valid = 0, + .next_packet_pointer = 262, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + }, + }, + { + /* PKT_PROT_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 3, + .branch = { + { + /* TCP */ + .valid = 1, + .next_packet_pointer = 26, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 0, + .data = 0x0600, + .mask = 0xffff + }, + { + /* UDP */ + .valid = 1, + .next_packet_pointer = 26, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 0, + .data = 0x1100, + .mask = 0xffff + }, + { + .valid = 0, + .next_packet_pointer = 260, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* RSS_IPV4_TCP_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = BOTH_BYTES, + .result_pointer = DB_RES_DROP, + .num_branches = 6, + .branch = { + { + /* SRC IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 28, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 1, + .data = 0x0, + .mask = 0xffff + }, + { + /* SRC IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 30, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 2, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 32, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 3, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 34, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 4, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP SRC Port */ + .valid = 0, + .next_packet_pointer = 36, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 5, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP DST Port */ + .valid = 0, + .next_packet_pointer = 256, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* RSS_IPV4_UDP_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = BOTH_BYTES, + .result_pointer = DB_RES_DROP, + .num_branches = 6, + .branch = { + { + /* SRC IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 28, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 1, + .data = 0x0, + .mask = 0xffff + }, + { + /* SRC IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 30, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 2, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 32, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 3, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 34, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 4, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP SRC Port */ + .valid = 0, + .next_packet_pointer = 36, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 5, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP DST Port */ + .valid = 0, + .next_packet_pointer = 256, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* LAST NODE */ + .node_type = EWDN, + .last_node = 1, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 1, + .branch = { + { + .valid = 0, + .next_packet_pointer = 0, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = MAX_NODES, + .next_branch = 0, + .data = 0, + .mask = 0xffff + } + } + } + }; + + ptree = &enet_cle->ptree; + ptree->start_pkt = 12; /* Ethertype */ + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + ret = xgene_cle_setup_rss(pdata); + if (ret) { + netdev_err(pdata->ndev, "RSS initialization failed\n"); + return ret; + } + } else { + br = &ptree_dn[PKT_PROT_NODE].branch[0]; + br->valid = 0; + br->next_packet_pointer = 260; + br->next_node = LAST_NODE; + br->data = 0x0000; + br->mask = 0xffff; + } + + def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); + pool_id = pdata->rx_ring[0]->buf_pool->id; + def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + + memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); + dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; + dbptr[DB_RES_ACCEPT].dstqid = def_qid; + dbptr[DB_RES_ACCEPT].cle_priority = 1; + + dbptr[DB_RES_DEF].fpsel = def_fpsel; + dbptr[DB_RES_DEF].dstqid = def_qid; + dbptr[DB_RES_DEF].cle_priority = 7; + xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], + DB_RES_ACCEPT, 7); + + dbptr[DB_RES_DROP].drop = 1; + + memset(&kn, 0, sizeof(kn)); + kn.node_type = KN; + kn.num_keys = 1; + kn.key[0].priority = 0; + kn.key[0].result_pointer = DB_RES_ACCEPT; + + ptree->dn = ptree_dn; + ptree->kn = &kn; + ptree->dbptr = dbptr; + ptree->num_dn = MAX_NODES; + ptree->num_kn = 1; + ptree->num_dbptr = DB_MAX_PTRS; + + return xgene_cle_setup_ptree(pdata, enet_cle); +} + +struct xgene_cle_ops xgene_cle3in_ops = { + .cle_init = xgene_enet_cle_init, +}; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h new file mode 100644 index 000000000000..29a17abdd828 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -0,0 +1,295 @@ +/* Applied Micro X-Gene SoC Ethernet Classifier structures + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Authors: Khuong Dinh <kdinh@apm.com> + * Tanmay Inamdar <tinamdar@apm.com> + * Iyappan Subramanian <isubramanian@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __XGENE_ENET_CLE_H__ +#define __XGENE_ENET_CLE_H__ + +#include <linux/io.h> +#include <linux/random.h> + +/* Register offsets */ +#define INDADDR 0x04 +#define INDCMD 0x08 +#define INDCMD_STATUS 0x0c +#define DATA_RAM0 0x10 +#define SNPTR0 0x0100 +#define SPPTR0 0x0104 +#define DFCLSRESDBPTR0 0x0108 +#define DFCLSRESDB00 0x010c +#define RSS_CTRL0 0x0000013c + +#define CLE_CMD_TO 10 /* ms */ +#define CLE_PKTRAM_SIZE 256 /* bytes */ +#define CLE_PORT_OFFSET 0x200 +#define CLE_DRAM_REGS 17 + +#define CLE_DN_TYPE_LEN 2 +#define CLE_DN_TYPE_POS 0 +#define CLE_DN_LASTN_LEN 1 +#define CLE_DN_LASTN_POS 2 +#define CLE_DN_HLS_LEN 1 +#define CLE_DN_HLS_POS 3 +#define CLE_DN_EXT_LEN 2 +#define CLE_DN_EXT_POS 4 +#define CLE_DN_BSTOR_LEN 2 +#define CLE_DN_BSTOR_POS 6 +#define CLE_DN_SBSTOR_LEN 2 +#define CLE_DN_SBSTOR_POS 8 +#define CLE_DN_RPTR_LEN 12 +#define CLE_DN_RPTR_POS 12 + +#define CLE_BR_VALID_LEN 1 +#define CLE_BR_VALID_POS 0 +#define CLE_BR_NPPTR_LEN 9 +#define CLE_BR_NPPTR_POS 1 +#define CLE_BR_JB_LEN 1 +#define CLE_BR_JB_POS 10 +#define CLE_BR_JR_LEN 1 +#define CLE_BR_JR_POS 11 +#define CLE_BR_OP_LEN 3 +#define CLE_BR_OP_POS 12 +#define CLE_BR_NNODE_LEN 9 +#define CLE_BR_NNODE_POS 15 +#define CLE_BR_NBR_LEN 5 +#define CLE_BR_NBR_POS 24 + +#define CLE_BR_DATA_LEN 16 +#define CLE_BR_DATA_POS 0 +#define CLE_BR_MASK_LEN 16 +#define CLE_BR_MASK_POS 16 + +#define CLE_KN_PRIO_POS 0 +#define CLE_KN_PRIO_LEN 3 +#define CLE_KN_RPTR_POS 3 +#define CLE_KN_RPTR_LEN 10 +#define CLE_TYPE_POS 0 +#define CLE_TYPE_LEN 2 + +#define CLE_DSTQIDL_POS 25 +#define CLE_DSTQIDL_LEN 7 +#define CLE_DSTQIDH_POS 0 +#define CLE_DSTQIDH_LEN 5 +#define CLE_FPSEL_POS 21 +#define CLE_FPSEL_LEN 4 +#define CLE_PRIORITY_POS 5 +#define CLE_PRIORITY_LEN 3 + +#define JMP_ABS 0 +#define JMP_REL 1 +#define JMP_FW 0 +#define JMP_BW 1 + +enum xgene_cle_ptree_nodes { + PKT_TYPE_NODE, + PKT_PROT_NODE, + RSS_IPV4_TCP_NODE, + RSS_IPV4_UDP_NODE, + LAST_NODE, + MAX_NODES +}; + +enum xgene_cle_byte_store { + NO_BYTE, + FIRST_BYTE, + SECOND_BYTE, + BOTH_BYTES +}; + +/* Preclassification operation types */ +enum xgene_cle_node_type { + INV, + KN, + EWDN, + RES_NODE +}; + +/* Preclassification operation types */ +enum xgene_cle_op_type { + EQT, + NEQT, + LTEQT, + GTEQT, + AND, + NAND +}; + +enum xgene_cle_parser { + PARSER0, + PARSER1, + PARSER2, + PARSER_ALL +}; + +#define XGENE_CLE_DRAM(type) (((type) & 0xf) << 28) +enum xgene_cle_dram_type { + PKT_RAM, + RSS_IDT, + RSS_IPV4_HASH_SKEY, + PTREE_RAM = 0xc, + AVL_RAM, + DB_RAM +}; + +enum xgene_cle_cmd_type { + CLE_CMD_WR = 1, + CLE_CMD_RD = 2, + CLE_CMD_AVL_ADD = 8, + CLE_CMD_AVL_DEL = 16, + CLE_CMD_AVL_SRCH = 32 +}; + +enum xgene_cle_ipv4_rss_hashtype { + RSS_IPV4_8B, + RSS_IPV4_12B, +}; + +enum xgene_cle_prot_type { + XGENE_CLE_TCP, + XGENE_CLE_UDP, + XGENE_CLE_ESP, + XGENE_CLE_OTHER +}; + +enum xgene_cle_prot_version { + XGENE_CLE_IPV4, +}; + +enum xgene_cle_ptree_dbptrs { + DB_RES_DROP, + DB_RES_DEF, + DB_RES_ACCEPT, + DB_MAX_PTRS +}; + +/* RSS sideband signal info */ +#define SB_IPFRAG_POS 0 +#define SB_IPFRAG_LEN 1 +#define SB_IPPROT_POS 1 +#define SB_IPPROT_LEN 2 +#define SB_IPVER_POS 3 +#define SB_IPVER_LEN 1 +#define SB_HDRLEN_POS 4 +#define SB_HDRLEN_LEN 12 + +/* RSS indirection table */ +#define XGENE_CLE_IDT_ENTRIES 128 +#define IDT_DSTQID_POS 0 +#define IDT_DSTQID_LEN 12 +#define IDT_FPSEL_POS 12 +#define IDT_FPSEL_LEN 4 +#define IDT_NFPSEL_POS 16 +#define IDT_NFPSEL_LEN 4 + +struct xgene_cle_ptree_branch { + bool valid; + u16 next_packet_pointer; + bool jump_bw; + bool jump_rel; + u8 operation; + u16 next_node; + u8 next_branch; + u16 data; + u16 mask; +}; + +struct xgene_cle_ptree_ewdn { + u8 node_type; + bool last_node; + bool hdr_len_store; + u8 hdr_extn; + u8 byte_store; + u8 search_byte_store; + u16 result_pointer; + u8 num_branches; + struct xgene_cle_ptree_branch branch[6]; +}; + +struct xgene_cle_ptree_key { + u8 priority; + u16 result_pointer; +}; + +struct xgene_cle_ptree_kn { + u8 node_type; + u8 num_keys; + struct xgene_cle_ptree_key key[32]; +}; + +struct xgene_cle_dbptr { + u8 split_boundary; + u8 mirror_nxtfpsel; + u8 mirror_fpsel; + u16 mirror_dstqid; + u8 drop; + u8 mirror; + u8 hdr_data_split; + u64 hopinfomsbs; + u8 DR; + u8 HR; + u64 hopinfomlsbs; + u16 h0enq_num; + u8 h0fpsel; + u8 nxtfpsel; + u8 fpsel; + u16 dstqid; + u8 cle_priority; + u8 cle_flowgroup; + u8 cle_perflow; + u8 cle_insert_timestamp; + u8 stash; + u8 in; + u8 perprioen; + u8 perflowgroupen; + u8 perflowen; + u8 selhash; + u8 selhdrext; + u8 mirror_nxtfpsel_msb; + u8 mirror_fpsel_msb; + u8 hfpsel_msb; + u8 nxtfpsel_msb; + u8 fpsel_msb; +}; + +struct xgene_cle_ptree { + struct xgene_cle_ptree_ewdn *dn; + struct xgene_cle_ptree_kn *kn; + struct xgene_cle_dbptr *dbptr; + u32 num_dn; + u32 num_kn; + u32 num_dbptr; + u32 start_node; + u32 start_pkt; + u32 start_dbptr; +}; + +struct xgene_enet_cle { + void __iomem *base; + struct xgene_cle_ptree ptree; + enum xgene_cle_parser active_parser; + u32 parsers; + u32 max_nodes; + u32 max_dbptrs; + u32 jump_bytes; +}; + +extern struct xgene_cle_ops xgene_cle3in_ops; + +#endif /* __XGENE_ENET_CLE_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index db55c9f6e8e1..39e081a70f5b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -204,6 +204,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) return num_msgs; } +static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) +{ + u32 data = 0x7777; + + xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); +} + void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, struct xgene_enet_pdata *pdata, enum xgene_enet_err_code status) @@ -892,4 +903,5 @@ struct xgene_ring_ops xgene_ring1_ops = { .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, + .coalesce = xgene_enet_setup_coalescing, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 8a9091039ab4..ba7da98af2ef 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -54,6 +54,11 @@ enum xgene_enet_rm { #define IS_BUFFER_POOL BIT(20) #define PREFETCH_BUF_EN BIT(21) #define CSR_RING_ID_BUF 0x000c +#define CSR_PBM_COAL 0x0014 +#define CSR_PBM_CTICK1 0x001c +#define CSR_PBM_CTICK2 0x0020 +#define CSR_THRESHOLD0_SET1 0x0030 +#define CSR_THRESHOLD1_SET1 0x0034 #define CSR_RING_NE_INT_MODE 0x017c #define CSR_RING_CONFIG 0x006c #define CSR_RING_WR_BASE 0x0070 @@ -101,6 +106,7 @@ enum xgene_enet_rm { #define MAC_OFFSET 0x30 #define BLOCK_ETH_CSR_OFFSET 0x2000 +#define BLOCK_ETH_CLE_CSR_OFFSET 0x6000 #define BLOCK_ETH_RING_IF_OFFSET 0x9000 #define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000 #define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 5eb9b20c0eea..8d4c1ad2fc60 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -93,13 +93,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, return 0; } -static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) -{ - struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); - - return ((u16)pdata->rm << 10) | ring->num; -} - static u8 xgene_enet_hdr_len(const void *data) { const struct ethhdr *eth = data; @@ -189,7 +182,6 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, static u64 xgene_enet_work_msg(struct sk_buff *skb) { struct net_device *ndev = skb->dev; - struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct iphdr *iph; u8 l3hlen = 0, l4hlen = 0; u8 ethhdr, proto = 0, csum_enable = 0; @@ -235,10 +227,6 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb) if (!mss || ((skb->len - hdr_len) <= mss)) goto out; - if (mss != pdata->mss) { - pdata->mss = mss; - pdata->mac_ops->set_mss(pdata); - } hopinfo |= SET_BIT(ET); } } else if (iph->protocol == IPPROTO_UDP) { @@ -420,7 +408,7 @@ out: raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | SET_VAL(USERINFO, tx_ring->tail)); tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; - pdata->tx_level += count; + pdata->tx_level[tx_ring->cp_ring->index] += count; tx_ring->tail = tail; return count; @@ -430,15 +418,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; - u32 tx_level = pdata->tx_level; + struct xgene_enet_desc_ring *tx_ring; + int index = skb->queue_mapping; + u32 tx_level = pdata->tx_level[index]; int count; - if (tx_level < pdata->txc_level) - tx_level += ((typeof(pdata->tx_level))~0U); + tx_ring = pdata->tx_ring[index]; + if (tx_level < pdata->txc_level[index]) + tx_level += ((typeof(pdata->tx_level[index]))~0U); - if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { - netif_stop_queue(ndev); + if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) { + netif_stop_subqueue(ndev, index); return NETDEV_TX_BUSY; } @@ -536,7 +526,8 @@ static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, int budget) { - struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + struct net_device *ndev = ring->ndev; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_raw_desc *raw_desc, *exp_desc; u16 head = ring->head; u16 slots = ring->slots - 1; @@ -580,7 +571,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, desc_count++; processed++; if (is_completion) - pdata->txc_level += desc_count; + pdata->txc_level[ring->index] += desc_count; if (ret) break; @@ -590,8 +581,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, pdata->ring_ops->wr_cmd(ring, -count); ring->head = head; - if (netif_queue_stopped(ring->ndev)) - netif_start_queue(ring->ndev); + if (__netif_subqueue_stopped(ndev, ring->index)) + netif_start_subqueue(ndev, ring->index); } return processed; @@ -616,8 +607,16 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) static void xgene_enet_timeout(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct netdev_queue *txq; + int i; pdata->mac_ops->reset(pdata); + + for (i = 0; i < pdata->txq_cnt; i++) { + txq = netdev_get_tx_queue(ndev, i); + txq->trans_start = jiffies; + netif_tx_start_queue(txq); + } } static int xgene_enet_register_irq(struct net_device *ndev) @@ -625,17 +624,21 @@ static int xgene_enet_register_irq(struct net_device *ndev) struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *ring; - int ret; + int ret = 0, i; - ring = pdata->rx_ring; - irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); - ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, - IRQF_SHARED, ring->irq_name, ring); - if (ret) - netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ring->irq_name, ring); + if (ret) { + netdev_err(ndev, "Failed to request irq %s\n", + ring->irq_name); + } + } - if (pdata->cq_cnt) { - ring = pdata->tx_ring->cp_ring; + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, IRQF_SHARED, ring->irq_name, ring); @@ -653,15 +656,19 @@ static void xgene_enet_free_irq(struct net_device *ndev) struct xgene_enet_pdata *pdata; struct xgene_enet_desc_ring *ring; struct device *dev; + int i; pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); - ring = pdata->rx_ring; - irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); - devm_free_irq(dev, ring->irq, ring); - if (pdata->cq_cnt) { - ring = pdata->tx_ring->cp_ring; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); + } + + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); devm_free_irq(dev, ring->irq, ring); } @@ -670,12 +677,15 @@ static void xgene_enet_free_irq(struct net_device *ndev) static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - napi_enable(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + napi_enable(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; napi_enable(napi); } } @@ -683,12 +693,15 @@ static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - napi_disable(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + napi_disable(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; napi_disable(napi); } } @@ -699,6 +712,14 @@ static int xgene_enet_open(struct net_device *ndev) const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int ret; + ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt); + if (ret) + return ret; + + ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt); + if (ret) + return ret; + mac_ops->tx_enable(pdata); mac_ops->rx_enable(pdata); @@ -721,6 +742,7 @@ static int xgene_enet_close(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); const struct xgene_mac_ops *mac_ops = pdata->mac_ops; + int i; netif_stop_queue(ndev); @@ -734,7 +756,8 @@ static int xgene_enet_close(struct net_device *ndev) xgene_enet_free_irq(ndev); xgene_enet_napi_disable(pdata); - xgene_enet_process_ring(pdata->rx_ring, -1); + for (i = 0; i < pdata->rxq_cnt; i++) + xgene_enet_process_ring(pdata->rx_ring[i], -1); return 0; } @@ -754,18 +777,26 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) { struct xgene_enet_desc_ring *buf_pool; + struct xgene_enet_desc_ring *ring; + int i; - if (pdata->tx_ring) { - xgene_enet_delete_ring(pdata->tx_ring); - pdata->tx_ring = NULL; + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + if (ring) { + xgene_enet_delete_ring(ring); + pdata->tx_ring[i] = NULL; + } } - if (pdata->rx_ring) { - buf_pool = pdata->rx_ring->buf_pool; - xgene_enet_delete_bufpool(buf_pool); - xgene_enet_delete_ring(buf_pool); - xgene_enet_delete_ring(pdata->rx_ring); - pdata->rx_ring = NULL; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (ring) { + buf_pool = ring->buf_pool; + xgene_enet_delete_bufpool(buf_pool); + xgene_enet_delete_ring(buf_pool); + xgene_enet_delete_ring(ring); + pdata->rx_ring[i] = NULL; + } } } @@ -820,24 +851,29 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; struct xgene_enet_desc_ring *ring; + int i; + + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + if (ring) { + if (ring->cp_ring && ring->cp_ring->cp_skb) + devm_kfree(dev, ring->cp_ring->cp_skb); + if (ring->cp_ring && pdata->cq_cnt) + xgene_enet_free_desc_ring(ring->cp_ring); + xgene_enet_free_desc_ring(ring); + } + } - ring = pdata->tx_ring; - if (ring) { - if (ring->cp_ring && ring->cp_ring->cp_skb) - devm_kfree(dev, ring->cp_ring->cp_skb); - if (ring->cp_ring && pdata->cq_cnt) - xgene_enet_free_desc_ring(ring->cp_ring); - xgene_enet_free_desc_ring(ring); - } - - ring = pdata->rx_ring; - if (ring) { - if (ring->buf_pool) { - if (ring->buf_pool->rx_skb) - devm_kfree(dev, ring->buf_pool->rx_skb); - xgene_enet_free_desc_ring(ring->buf_pool); + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (ring) { + if (ring->buf_pool) { + if (ring->buf_pool->rx_skb) + devm_kfree(dev, ring->buf_pool->rx_skb); + xgene_enet_free_desc_ring(ring->buf_pool); + } + xgene_enet_free_desc_ring(ring); } - xgene_enet_free_desc_ring(ring); } } @@ -950,104 +986,120 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; u16 ring_id; - int ret, size; - - /* allocate rx descriptor ring */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); - rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_16KB, ring_id); - if (!rx_ring) { - ret = -ENOMEM; - goto err; - } + int i, ret, size; - /* allocate buffer pool for receiving packets */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); - buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_2KB, ring_id); - if (!buf_pool) { - ret = -ENOMEM; - goto err; - } - - rx_ring->nbufpool = NUM_BUFPOOL; - rx_ring->buf_pool = buf_pool; - rx_ring->irq = pdata->rx_irq; - if (!pdata->cq_cnt) { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", - ndev->name); - } else { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); - } - buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, - sizeof(struct sk_buff *), GFP_KERNEL); - if (!buf_pool->rx_skb) { - ret = -ENOMEM; - goto err; - } - - buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); - rx_ring->buf_pool = buf_pool; - pdata->rx_ring = rx_ring; + for (i = 0; i < pdata->rxq_cnt; i++) { + /* allocate rx descriptor ring */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!rx_ring) { + ret = -ENOMEM; + goto err; + } - /* allocate tx descriptor ring */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); - tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_16KB, ring_id); - if (!tx_ring) { - ret = -ENOMEM; - goto err; - } + /* allocate buffer pool for receiving packets */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); + buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_2KB, + ring_id); + if (!buf_pool) { + ret = -ENOMEM; + goto err; + } - size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; - tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs, + rx_ring->nbufpool = NUM_BUFPOOL; + rx_ring->buf_pool = buf_pool; + rx_ring->irq = pdata->irqs[i]; + if (!pdata->cq_cnt) { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", + ndev->name); + } else { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d", + ndev->name, i); + } + buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, + sizeof(struct sk_buff *), GFP_KERNEL); - if (!tx_ring->exp_bufs) { - ret = -ENOMEM; - goto err; - } + if (!buf_pool->rx_skb) { + ret = -ENOMEM; + goto err; + } - pdata->tx_ring = tx_ring; + buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); + rx_ring->buf_pool = buf_pool; + pdata->rx_ring[i] = rx_ring; + } - if (!pdata->cq_cnt) { - cp_ring = pdata->rx_ring; - } else { - /* allocate tx completion descriptor ring */ - ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); - cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + for (i = 0; i < pdata->txq_cnt; i++) { + /* allocate tx descriptor ring */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); + tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); - if (!cp_ring) { + if (!tx_ring) { ret = -ENOMEM; goto err; } - cp_ring->irq = pdata->txc_irq; - snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); - } - cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, - sizeof(struct sk_buff *), GFP_KERNEL); - if (!cp_ring->cp_skb) { - ret = -ENOMEM; - goto err; - } + size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; + tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, + &dma_exp_bufs, + GFP_KERNEL); + if (!tx_ring->exp_bufs) { + ret = -ENOMEM; + goto err; + } - size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; - cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, - size, GFP_KERNEL); - if (!cp_ring->frag_dma_addr) { - devm_kfree(dev, cp_ring->cp_skb); - ret = -ENOMEM; - goto err; - } + pdata->tx_ring[i] = tx_ring; - pdata->tx_ring->cp_ring = cp_ring; - pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + if (!pdata->cq_cnt) { + cp_ring = pdata->rx_ring[i]; + } else { + /* allocate tx completion descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, + cpu_bufnum++); + cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!cp_ring) { + ret = -ENOMEM; + goto err; + } - pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; + cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; + cp_ring->index = i; + snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d", + ndev->name, i); + } + + cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!cp_ring->cp_skb) { + ret = -ENOMEM; + goto err; + } + + size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; + cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, + size, GFP_KERNEL); + if (!cp_ring->frag_dma_addr) { + devm_kfree(dev, cp_ring->cp_skb); + ret = -ENOMEM; + goto err; + } + + tx_ring->cp_ring = cp_ring; + tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + } + + pdata->ring_ops->coalesce(pdata->tx_ring[0]); + pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; return 0; @@ -1166,6 +1218,32 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) return 0; } +static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) +{ + struct platform_device *pdev = pdata->pdev; + struct device *dev = &pdev->dev; + int i, ret, max_irqs; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + max_irqs = 1; + else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) + max_irqs = 2; + else + max_irqs = XGENE_MAX_ENET_IRQ; + + for (i = 0; i < max_irqs; i++) { + ret = platform_get_irq(pdev, i); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->irqs[i] = ret; + } + + return 0; +} + static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; @@ -1247,25 +1325,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; - ret = platform_get_irq(pdev, 0); - if (ret <= 0) { - dev_err(dev, "Unable to get ENET Rx IRQ\n"); - ret = ret ? : -ENXIO; + ret = xgene_enet_get_irqs(pdata); + if (ret) return ret; - } - pdata->rx_irq = ret; - - if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { - ret = platform_get_irq(pdev, 1); - if (ret <= 0) { - pdata->cq_cnt = 0; - dev_info(dev, "Unable to get Tx completion IRQ," - "using Rx IRQ instead\n"); - } else { - pdata->cq_cnt = XGENE_MAX_TXC_RINGS; - pdata->txc_irq = ret; - } - } pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { @@ -1278,6 +1340,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) else base_addr = pdata->base_addr; pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; + pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET; pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || @@ -1298,10 +1361,11 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) { + struct xgene_enet_cle *enet_cle = &pdata->cle; struct net_device *ndev = pdata->ndev; struct xgene_enet_desc_ring *buf_pool; u16 dst_ring_num; - int ret; + int i, ret; ret = pdata->port_ops->reset(pdata); if (ret) @@ -1314,16 +1378,36 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) } /* setup buffer pool */ - buf_pool = pdata->rx_ring->buf_pool; - xgene_enet_init_bufpool(buf_pool); - ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); - if (ret) { - xgene_enet_delete_desc_rings(pdata); - return ret; + for (i = 0; i < pdata->rxq_cnt; i++) { + buf_pool = pdata->rx_ring[i]->buf_pool; + xgene_enet_init_bufpool(buf_pool); + ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); + if (ret) { + xgene_enet_delete_desc_rings(pdata); + return ret; + } + } + + dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); + buf_pool = pdata->rx_ring[0]->buf_pool; + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + /* Initialize and Enable PreClassifier Tree */ + enet_cle->max_nodes = 512; + enet_cle->max_dbptrs = 1024; + enet_cle->parsers = 3; + enet_cle->active_parser = PARSER_ALL; + enet_cle->ptree.start_node = 0; + enet_cle->ptree.start_dbptr = 0; + enet_cle->jump_bytes = 8; + ret = pdata->cle_ops->cle_init(pdata); + if (ret) { + netdev_err(ndev, "Preclass Tree init error\n"); + return ret; + } + } else { + pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); } - dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); - pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); pdata->mac_ops->init(pdata); return ret; @@ -1336,16 +1420,26 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) pdata->mac_ops = &xgene_gmac_ops; pdata->port_ops = &xgene_gport_ops; pdata->rm = RM3; + pdata->rxq_cnt = 1; + pdata->txq_cnt = 1; + pdata->cq_cnt = 0; break; case PHY_INTERFACE_MODE_SGMII: pdata->mac_ops = &xgene_sgmac_ops; pdata->port_ops = &xgene_sgport_ops; pdata->rm = RM1; + pdata->rxq_cnt = 1; + pdata->txq_cnt = 1; + pdata->cq_cnt = 1; break; default: pdata->mac_ops = &xgene_xgmac_ops; pdata->port_ops = &xgene_xgport_ops; + pdata->cle_ops = &xgene_cle3in_ops; pdata->rm = RM0; + pdata->rxq_cnt = XGENE_NUM_RX_RING; + pdata->txq_cnt = XGENE_NUM_TX_RING; + pdata->cq_cnt = XGENE_NUM_TXC_RING; break; } @@ -1399,12 +1493,16 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + netif_napi_add(pdata->ndev, napi, xgene_enet_napi, + NAPI_POLL_WEIGHT); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); } @@ -1413,12 +1511,15 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - netif_napi_del(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + netif_napi_del(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; netif_napi_del(napi); } } @@ -1432,7 +1533,8 @@ static int xgene_enet_probe(struct platform_device *pdev) const struct of_device_id *of_id; int ret; - ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); + ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), + XGENE_NUM_RX_RING, XGENE_NUM_TX_RING); if (!ndev) return -ENOMEM; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 248dfc40a761..175d18890c7a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -36,6 +36,7 @@ #include <linux/if_vlan.h> #include <linux/phy.h> #include "xgene_enet_hw.h" +#include "xgene_enet_cle.h" #include "xgene_enet_ring2.h" #define XGENE_DRV_VERSION "v1.0" @@ -48,6 +49,11 @@ #define XGENE_ENET_MSS 1448 #define XGENE_MIN_ENET_FRAME_SIZE 60 +#define XGENE_MAX_ENET_IRQ 8 +#define XGENE_NUM_RX_RING 4 +#define XGENE_NUM_TX_RING 4 +#define XGENE_NUM_TXC_RING 4 + #define START_CPU_BUFNUM_0 0 #define START_ETH_BUFNUM_0 2 #define START_BP_BUFNUM_0 0x22 @@ -72,7 +78,6 @@ #define X2_START_RING_NUM_1 256 #define IRQ_ID_SIZE 16 -#define XGENE_MAX_TXC_RINGS 1 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -102,6 +107,7 @@ struct xgene_enet_desc_ring { void *irq_mbox_addr; u16 dst_ring_num; u8 nbufpool; + u8 index; struct sk_buff *(*rx_skb); struct sk_buff *(*cp_skb); dma_addr_t *frag_dma_addr; @@ -143,6 +149,11 @@ struct xgene_ring_ops { void (*clear)(struct xgene_enet_desc_ring *); void (*wr_cmd)(struct xgene_enet_desc_ring *, int); u32 (*len)(struct xgene_enet_desc_ring *); + void (*coalesce)(struct xgene_enet_desc_ring *); +}; + +struct xgene_cle_ops { + int (*cle_init)(struct xgene_enet_pdata *pdata); }; /* ethernet private data */ @@ -154,15 +165,16 @@ struct xgene_enet_pdata { struct clk *clk; struct platform_device *pdev; enum xgene_enet_id enet_id; - struct xgene_enet_desc_ring *tx_ring; - struct xgene_enet_desc_ring *rx_ring; - u16 tx_level; - u16 txc_level; + struct xgene_enet_desc_ring *tx_ring[XGENE_NUM_TX_RING]; + struct xgene_enet_desc_ring *rx_ring[XGENE_NUM_RX_RING]; + u16 tx_level[XGENE_NUM_TX_RING]; + u16 txc_level[XGENE_NUM_TX_RING]; char *dev_name; u32 rx_buff_cnt; u32 tx_qcnt_hi; - u32 rx_irq; - u32 txc_irq; + u32 irqs[XGENE_MAX_ENET_IRQ]; + u8 rxq_cnt; + u8 txq_cnt; u8 cq_cnt; void __iomem *eth_csr_addr; void __iomem *eth_ring_if_addr; @@ -174,10 +186,12 @@ struct xgene_enet_pdata { void __iomem *ring_cmd_addr; int phy_mode; enum xgene_enet_rm rm; + struct xgene_enet_cle cle; struct rtnl_link_stats64 stats; const struct xgene_mac_ops *mac_ops; const struct xgene_port_ops *port_ops; struct xgene_ring_ops *ring_ops; + struct xgene_cle_ops *cle_ops; struct delayed_work link_work; u32 port_id; u8 cpu_bufnum; @@ -229,6 +243,13 @@ static inline struct device *ndev_to_dev(struct net_device *ndev) return ndev->dev.parent; } +static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + return ((u16)pdata->rm << 10) | ring->num; +} + void xgene_enet_set_ethtool_ops(struct net_device *netdev); #endif /* __XGENE_ENET_MAIN_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index 0b6896bb351e..2b76732add5d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c @@ -190,6 +190,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) return num_msgs; } +static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) +{ + u32 data = 0x7777; + + xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); +} + struct xgene_ring_ops xgene_ring2_ops = { .num_ring_config = X2_NUM_RING_CONFIG, .num_ring_id_shift = 13, @@ -197,4 +208,5 @@ struct xgene_ring_ops xgene_ring2_ops = { .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, + .coalesce = xgene_enet_setup_coalescing, }; diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index dae1ac300a49..ca562bc034c3 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -14,36 +14,36 @@ #include <linux/clk.h> /* STATUS and ENABLE Register bit masks */ -#define TXINT_MASK (1<<0) /* Transmit interrupt */ -#define RXINT_MASK (1<<1) /* Receive interrupt */ -#define ERR_MASK (1<<2) /* Error interrupt */ -#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */ -#define MSER_MASK (1<<4) /* Missed packet counter error */ -#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */ -#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */ -#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */ -#define MDIO_MASK (1<<12) /* MDIO complete interrupt */ -#define TXPL_MASK (1<<31) /* Force polling of BD by EMAC */ +#define TXINT_MASK (1 << 0) /* Transmit interrupt */ +#define RXINT_MASK (1 << 1) /* Receive interrupt */ +#define ERR_MASK (1 << 2) /* Error interrupt */ +#define TXCH_MASK (1 << 3) /* Transmit chaining error interrupt */ +#define MSER_MASK (1 << 4) /* Missed packet counter error */ +#define RXCR_MASK (1 << 8) /* RXCRCERR counter rolled over */ +#define RXFR_MASK (1 << 9) /* RXFRAMEERR counter rolled over */ +#define RXFL_MASK (1 << 10) /* RXOFLOWERR counter rolled over */ +#define MDIO_MASK (1 << 12) /* MDIO complete interrupt */ +#define TXPL_MASK (1 << 31) /* Force polling of BD by EMAC */ /* CONTROL Register bit masks */ -#define EN_MASK (1<<0) /* VMAC enable */ -#define TXRN_MASK (1<<3) /* TX enable */ -#define RXRN_MASK (1<<4) /* RX enable */ -#define DSBC_MASK (1<<8) /* Disable receive broadcast */ -#define ENFL_MASK (1<<10) /* Enable Full-duplex */ -#define PROM_MASK (1<<11) /* Promiscuous mode */ +#define EN_MASK (1 << 0) /* VMAC enable */ +#define TXRN_MASK (1 << 3) /* TX enable */ +#define RXRN_MASK (1 << 4) /* RX enable */ +#define DSBC_MASK (1 << 8) /* Disable receive broadcast */ +#define ENFL_MASK (1 << 10) /* Enable Full-duplex */ +#define PROM_MASK (1 << 11) /* Promiscuous mode */ /* Buffer descriptor INFO bit masks */ -#define OWN_MASK (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */ -#define FIRST_MASK (1<<16) /* First buffer in chain */ -#define LAST_MASK (1<<17) /* Last buffer in chain */ +#define OWN_MASK (1 << 31) /* 0-CPU or 1-EMAC owns buffer */ +#define FIRST_MASK (1 << 16) /* First buffer in chain */ +#define LAST_MASK (1 << 17) /* Last buffer in chain */ #define LEN_MASK 0x000007FF /* last 11 bits */ -#define CRLS (1<<21) -#define DEFR (1<<22) -#define DROP (1<<23) -#define RTRY (1<<24) -#define LTCL (1<<28) -#define UFLO (1<<29) +#define CRLS (1 << 21) +#define DEFR (1 << 22) +#define DROP (1 << 23) +#define RTRY (1 << 24) +#define LTCL (1 << 28) +#define UFLO (1 << 29) #define FOR_EMAC OWN_MASK #define FOR_CPU 0 @@ -66,7 +66,7 @@ enum { R_MDIO, }; -#define TX_TIMEOUT (400*HZ/1000) /* Transmission timeout */ +#define TX_TIMEOUT (400 * HZ / 1000) /* Transmission timeout */ #define ARC_EMAC_NAPI_WEIGHT 40 /* Workload for NAPI */ @@ -102,6 +102,11 @@ struct buffer_state { DEFINE_DMA_UNMAP_LEN(len); }; +struct arc_emac_mdio_bus_data { + struct gpio_desc *reset_gpio; + int msec; +}; + /** * struct arc_emac_priv - Storage of EMAC's private information. * @dev: Pointer to the current device. @@ -131,6 +136,7 @@ struct arc_emac_priv { struct device *dev; struct phy_device *phy_dev; struct mii_bus *bus; + struct arc_emac_mdio_bus_data bus_data; void __iomem *regs; struct clk *clk; @@ -190,6 +196,7 @@ static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg) static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask) { unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value | mask); } @@ -205,6 +212,7 @@ static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask) static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) { unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value & ~mask); } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index abe1eabc0171..a3a9392a4954 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -26,7 +26,6 @@ #include "emac.h" - /** * arc_emac_tx_avail - Return the number of available slots in the tx ring. * @priv: Pointer to ARC EMAC private data structure. @@ -66,7 +65,7 @@ static void arc_emac_adjust_link(struct net_device *ndev) if (priv->duplex != phy_dev->duplex) { reg = arc_reg_get(priv, R_CTRL); - if (DUPLEX_FULL == phy_dev->duplex) + if (phy_dev->duplex == DUPLEX_FULL) reg |= ENFL_MASK; else reg &= ~ENFL_MASK; @@ -163,7 +162,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) struct sk_buff *skb = tx_buff->skb; unsigned int info = le32_to_cpu(txbd->info); - if ((info & FOR_EMAC) || !txbd->data) + if ((info & FOR_EMAC) || !txbd->data || !skb) break; if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { @@ -191,6 +190,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) txbd->data = 0; txbd->info = 0; + tx_buff->skb = NULL; *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; } @@ -446,6 +446,9 @@ static int arc_emac_open(struct net_device *ndev) *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; } + priv->txbd_curr = 0; + priv->txbd_dirty = 0; + /* Clean Tx BD's */ memset(priv->txbd, 0, TX_RING_SZ); @@ -462,9 +465,9 @@ static int arc_emac_open(struct net_device *ndev) /* Set CONTROL */ arc_reg_set(priv, R_CTRL, - (RX_BD_NUM << 24) | /* RX BD table length */ - (TX_BD_NUM << 16) | /* TX BD table length */ - TXRN_MASK | RXRN_MASK); + (RX_BD_NUM << 24) | /* RX BD table length */ + (TX_BD_NUM << 16) | /* TX BD table length */ + TXRN_MASK | RXRN_MASK); napi_enable(&priv->napi); @@ -514,6 +517,68 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) } /** + * arc_free_tx_queue - free skb from tx queue + * @ndev: Pointer to the network device. + * + * This function must be called while EMAC disable + */ +static void arc_free_tx_queue(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < TX_BD_NUM; i++) { + struct arc_emac_bd *txbd = &priv->txbd[i]; + struct buffer_state *tx_buff = &priv->tx_buff[i]; + + if (tx_buff->skb) { + dma_unmap_single(&ndev->dev, + dma_unmap_addr(tx_buff, addr), + dma_unmap_len(tx_buff, len), + DMA_TO_DEVICE); + + /* return the sk_buff to system */ + dev_kfree_skb_irq(tx_buff->skb); + } + + txbd->info = 0; + txbd->data = 0; + tx_buff->skb = NULL; + } +} + +/** + * arc_free_rx_queue - free skb from rx queue + * @ndev: Pointer to the network device. + * + * This function must be called while EMAC disable + */ +static void arc_free_rx_queue(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < RX_BD_NUM; i++) { + struct arc_emac_bd *rxbd = &priv->rxbd[i]; + struct buffer_state *rx_buff = &priv->rx_buff[i]; + + if (rx_buff->skb) { + dma_unmap_single(&ndev->dev, + dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), + DMA_FROM_DEVICE); + + /* return the sk_buff to system */ + dev_kfree_skb_irq(rx_buff->skb); + } + + rxbd->info = 0; + rxbd->data = 0; + rx_buff->skb = NULL; + } +} + +/** * arc_emac_stop - Close the network device. * @ndev: Pointer to the network device. * @@ -534,6 +599,10 @@ static int arc_emac_stop(struct net_device *ndev) /* Disable EMAC */ arc_reg_clr(priv, R_CTRL, EN_MASK); + /* Return the sk_buff to system */ + arc_free_tx_queue(ndev); + arc_free_rx_queue(ndev); + return 0; } @@ -610,7 +679,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); - priv->tx_buff[*txbd_curr].skb = skb; priv->txbd[*txbd_curr].data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ @@ -620,6 +688,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); + /* Make sure info word is set */ + wmb(); + + priv->tx_buff[*txbd_curr].skb = skb; + /* Increment index to point to the next BD */ *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; @@ -647,8 +720,8 @@ static void arc_emac_set_address_internal(struct net_device *ndev) struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int addr_low, addr_hi; - addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); - addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); + addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]); + addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]); arc_reg_set(priv, R_ADDRL, addr_low); arc_reg_set(priv, R_ADDRH, addr_hi); @@ -704,7 +777,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) unsigned int id, clock_frequency, irq; int err; - /* Get PHY from device tree */ phy_node = of_parse_phandle(dev->of_node, "phy", 0); if (!phy_node) { @@ -726,7 +798,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) return -ENODEV; } - ndev->netdev_ops = &arc_emac_netdev_ops; ndev->ethtool_ops = &arc_emac_ethtool_ops; ndev->watchdog_timeo = TX_TIMEOUT; @@ -737,9 +808,9 @@ int arc_emac_probe(struct net_device *ndev, int interface) priv->dev = dev; priv->regs = devm_ioremap_resource(dev, &res_regs); - if (IS_ERR(priv->regs)) { + if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); - } + dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); if (priv->clk) { @@ -860,10 +931,8 @@ int arc_emac_remove(struct net_device *ndev) unregister_netdev(ndev); netif_napi_del(&priv->napi); - if (!IS_ERR(priv->clk)) { + if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); - } - return 0; } diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index d5ee986936da..16419f550eff 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -7,6 +7,7 @@ #include <linux/delay.h> #include <linux/of_mdio.h> #include <linux/platform_device.h> +#include <linux/gpio/consumer.h> #include "emac.h" @@ -93,12 +94,31 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, phy_addr, reg_num, value); arc_reg_set(priv, R_MDIO, - 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value); + 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value); return arc_mdio_complete_wait(priv); } /** + * arc_mdio_reset + * @bus: points to the mii_bus structure + * Description: reset the MII bus + */ +int arc_mdio_reset(struct mii_bus *bus) +{ + struct arc_emac_priv *priv = bus->priv; + struct arc_emac_mdio_bus_data *data = &priv->bus_data; + + if (data->reset_gpio) { + gpiod_set_value_cansleep(data->reset_gpio, 1); + msleep(data->msec); + gpiod_set_value_cansleep(data->reset_gpio, 0); + } + + return 0; +} + +/** * arc_mdio_probe - MDIO probe function. * @priv: Pointer to ARC EMAC private data structure. * @@ -109,6 +129,8 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, */ int arc_mdio_probe(struct arc_emac_priv *priv) { + struct arc_emac_mdio_bus_data *data = &priv->bus_data; + struct device_node *np = priv->dev->of_node; struct mii_bus *bus; int error; @@ -122,6 +144,21 @@ int arc_mdio_probe(struct arc_emac_priv *priv) bus->name = "Synopsys MII Bus", bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; + bus->reset = &arc_mdio_reset; + + /* optional reset-related properties */ + data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset", + GPIOD_OUT_LOW); + if (IS_ERR(data->reset_gpio)) { + error = PTR_ERR(data->reset_gpio); + dev_err(priv->dev, "Failed to request gpio: %d\n", error); + return error; + } + + of_property_read_u32(np, "phy-reset-duration", &data->msec); + /* A sane reset duration should not be longer than 1s */ + if (data->msec > 1000) + data->msec = 1; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name); diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 85e821ccfcd2..e278e3d96ee0 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -50,7 +50,7 @@ static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) u32 data; int err = 0; - switch(speed) { + switch (speed) { case 10: data = (1 << (speed_offset + 16)) | (0 << speed_offset); break; @@ -83,9 +83,18 @@ static const struct emac_rockchip_soc_data emac_rk3188_emac_data = { }; static const struct of_device_id emac_rockchip_dt_ids[] = { - { .compatible = "rockchip,rk3036-emac", .data = &emac_rk3036_emac_data }, - { .compatible = "rockchip,rk3066-emac", .data = &emac_rk3066_emac_data }, - { .compatible = "rockchip,rk3188-emac", .data = &emac_rk3188_emac_data }, + { + .compatible = "rockchip,rk3036-emac", + .data = &emac_rk3036_emac_data, + }, + { + .compatible = "rockchip,rk3066-emac", + .data = &emac_rk3066_emac_data, + }, + { + .compatible = "rockchip,rk3188-emac", + .data = &emac_rk3188_emac_data, + }, { /* Sentinel */ } }; @@ -123,9 +132,11 @@ static int emac_rockchip_probe(struct platform_device *pdev) goto out_netdev; } - priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); + priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,grf"); if (IS_ERR(priv->grf)) { - dev_err(dev, "failed to retrieve global register file (%ld)\n", PTR_ERR(priv->grf)); + dev_err(dev, "failed to retrieve global register file (%ld)\n", + PTR_ERR(priv->grf)); err = PTR_ERR(priv->grf); goto out_netdev; } @@ -135,14 +146,16 @@ static int emac_rockchip_probe(struct platform_device *pdev) priv->emac.clk = devm_clk_get(dev, "hclk"); if (IS_ERR(priv->emac.clk)) { - dev_err(dev, "failed to retrieve host clock (%ld)\n", PTR_ERR(priv->emac.clk)); + dev_err(dev, "failed to retrieve host clock (%ld)\n", + PTR_ERR(priv->emac.clk)); err = PTR_ERR(priv->emac.clk); goto out_netdev; } priv->refclk = devm_clk_get(dev, "macref"); if (IS_ERR(priv->refclk)) { - dev_err(dev, "failed to retrieve reference clock (%ld)\n", PTR_ERR(priv->refclk)); + dev_err(dev, "failed to retrieve reference clock (%ld)\n", + PTR_ERR(priv->refclk)); err = PTR_ERR(priv->refclk); goto out_netdev; } @@ -179,19 +192,22 @@ static int emac_rockchip_probe(struct platform_device *pdev) err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); if (err) { - dev_err(dev, "unable to apply initial settings to grf (%d)\n", err); + dev_err(dev, "unable to apply initial settings to grf (%d)\n", + err); goto out_regulator_disable; } /* RMII interface needs always a rate of 50MHz */ err = clk_set_rate(priv->refclk, 50000000); if (err) - dev_err(dev, "failed to change reference clock rate (%d)\n", err); + dev_err(dev, + "failed to change reference clock rate (%d)\n", err); if (priv->soc_data->need_div_macclk) { priv->macclk = devm_clk_get(dev, "macclk"); if (IS_ERR(priv->macclk)) { - dev_err(dev, "failed to retrieve mac clock (%ld)\n", PTR_ERR(priv->macclk)); + dev_err(dev, "failed to retrieve mac clock (%ld)\n", + PTR_ERR(priv->macclk)); err = PTR_ERR(priv->macclk); goto out_regulator_disable; } @@ -205,7 +221,8 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* RMII TX/RX needs always a rate of 25MHz */ err = clk_set_rate(priv->macclk, 25000000); if (err) - dev_err(dev, "failed to change mac clock rate (%d)\n", err); + dev_err(dev, + "failed to change mac clock rate (%d)\n", err); } err = arc_emac_probe(ndev, interface); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 8b5988e210d5..d0084d4d1a9b 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -65,10 +65,6 @@ static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); static int atl1c_configure(struct atl1c_adapter *adapter); static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); -static const u16 atl1c_pay_load_size[] = { - 128, 256, 512, 1024, 2048, 4096, -}; - static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index f71ab2647a3b..08a23e6b60e9 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev) goto err_disable_clk; } - priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (of_phy_is_fixed_link(pdev->dev.of_node)) { + ret = of_phy_register_fixed_link(pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "bad fixed-link spec\n"); + goto err_free_bus; + } + priv->phy_node = of_node_get(pdev->dev.of_node); + } + + if (!priv->phy_node) + priv->phy_node = of_parse_phandle(pdev->dev.of_node, + "phy-handle", 0); + if (!priv->phy_node) { dev_err(&pdev->dev, "no PHY specified\n"); ret = -ENODEV; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 19f7cd02e085..18042c2460bd 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -149,6 +149,16 @@ config BNX2X_VXLAN Say Y here if you want to enable hardware offload support for Virtual eXtensible Local Area Network (VXLAN) in the driver. +config BNX2X_GENEVE + bool "Generic Network Virtualization Encapsulation (GENEVE) support" + depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m) + ---help--- + This allows one to create GENEVE virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. GENEVE is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to enable hardware offload support for + Generic Network Virtualization Encapsulation (GENEVE) in the driver. + config BGMAC tristate "BCMA bus GBit core support" depends on BCMA && BCMA_HOST_SOC diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 06f6cffdfaf5..99b30a952b38 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -26,6 +26,18 @@ static const struct bcma_device_id bgmac_bcma_tbl[] = { }; MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); +static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) +{ + switch (bgmac->core->bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM47094: + case BCMA_CHIP_ID_BCM53018: + return true; + default: + return false; + } +} + static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { @@ -987,11 +999,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac) static void bgmac_miiconfig(struct bgmac *bgmac) { struct bcma_device *core = bgmac->core; - struct bcma_chipinfo *ci = &core->bus->chipinfo; u8 imode; - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) { + if (bgmac_is_bcm4707_family(bgmac)) { bcma_awrite32(core, BCMA_IOCTL, bcma_aread32(core, BCMA_IOCTL) | 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); @@ -1043,8 +1053,9 @@ static void bgmac_chip_reset(struct bgmac *bgmac) (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) iost &= ~BGMAC_BCMA_IOST_ATTACHED; - /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */ - if (ci->id != BCMA_CHIP_ID_BCM4707) { + /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ + if (ci->id != BCMA_CHIP_ID_BCM4707 && + ci->id != BCMA_CHIP_ID_BCM47094) { flags = 0; if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; @@ -1055,9 +1066,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac) } /* Request Misc PLL for corerev > 2 */ - if (core->id.rev > 2 && - ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM53018) { + if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, @@ -1193,8 +1202,7 @@ static void bgmac_enable(struct bgmac *bgmac) break; } - if (ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM53018) { + if (!bgmac_is_bcm4707_family(bgmac)) { rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / @@ -1472,14 +1480,12 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac) static int bgmac_mii_register(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; int err = 0; - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) + if (bgmac_is_bcm4707_family(bgmac)) return bgmac_fixed_phy_register(bgmac); mii_bus = mdiobus_alloc(); @@ -1539,7 +1545,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ static int bgmac_probe(struct bcma_device *core) { - struct bcma_chipinfo *ci = &core->bus->chipinfo; struct net_device *net_dev; struct bgmac *bgmac; struct ssb_sprom *sprom = &core->bus->sprom; @@ -1620,8 +1625,7 @@ static int bgmac_probe(struct bcma_device *core) bgmac_chip_reset(bgmac); /* For Northstar, we have to take all GMAC core out of reset */ - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) { + if (bgmac_is_bcm4707_family(bgmac)) { struct bcma_device *ns_core; int ns_gmac; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index cae0956186ce..7dd7490fdac1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1277,8 +1277,7 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, - BNX2X_SP_RTNL_ADD_VXLAN_PORT, - BNX2X_SP_RTNL_DEL_VXLAN_PORT, + BNX2X_SP_RTNL_CHANGE_UDP_PORT, }; enum bnx2x_iov_flag { @@ -1327,6 +1326,17 @@ struct bnx2x_vlan_entry { bool hw; }; +enum bnx2x_udp_port_type { + BNX2X_UDP_PORT_VXLAN, + BNX2X_UDP_PORT_GENEVE, + BNX2X_UDP_PORT_MAX, +}; + +struct bnx2x_udp_tunnel { + u16 dst_port; + u8 count; +}; + struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure @@ -1830,9 +1840,10 @@ struct bnx2x { struct list_head vlan_reg; u16 vlan_cnt; u16 vlan_credit; - u16 vxlan_dst_port; - u8 vxlan_dst_port_count; bool accept_any_vlan; + + /* Vxlan/Geneve related information */ + struct bnx2x_udp_tunnel udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; }; /* Tx queues may be less or equal to Rx queues */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9695a4c4a434..0a9108cd4c45 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3042,8 +3042,12 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_save_statistics(bp); } - /* wait till consumers catch up with producers in all queues */ - bnx2x_drain_tx_queues(bp); + /* wait till consumers catch up with producers in all queues. + * If we're recovering, FW can't write to host so no reason + * to wait for the queues to complete all Tx. + */ + if (unload_mode != UNLOAD_RECOVERY) + bnx2x_drain_tx_queues(bp); /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations @@ -4272,6 +4276,14 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + return bnx2x_setup_tc(dev, tc->tc); +} + /* called with rtnl_lock */ int bnx2x_change_mac_addr(struct net_device *dev, void *p) { @@ -5086,4 +5098,3 @@ void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, flag); schedule_delayed_work(&bp->sp_rtnl_task, 0); } -EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 4cbb03f87b5a..0e68fadecfdb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,6 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); @@ -923,6 +925,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_start_params *start_params = &func_params.params.start; + u16 port; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); @@ -959,8 +962,14 @@ static inline int bnx2x_func_start(struct bnx2x *bp) start_params->network_cos_mode = STATIC_COS; else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - - start_params->vxlan_dst_port = bp->vxlan_dst_port; + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].dst_port; + start_params->vxlan_dst_port = port; + } + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].dst_port; + start_params->geneve_dst_port = port; + } start_params->inner_rss = 1; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 7ccf6684e0a3..2c6ba046d2a8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -195,6 +195,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, u32 error) { u8 index; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + u8 iscsi_pri_found = 0, fcoe_pri_found = 0; if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n"); @@ -210,29 +211,57 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, bp->dcbx_port_params.app.enabled = true; + /* Use 0 as the default application priority for all. */ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) ttp[index] = 0; - if (app->default_pri < MAX_PFC_PRIORITIES) - ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; - for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { struct dcbx_app_priority_entry *entry = app->app_pri_tbl; + enum traffic_type type = MAX_TRAFFIC_TYPE; if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_ETH_TYPE) && - ETH_TYPE_FCOE == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_FCOE); + DCBX_APP_SF_DEFAULT) && + GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE)) { + type = LLFC_TRAFFIC_TYPE_NW; + } else if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_PORT) && + TCP_PORT_ISCSI == entry[index].app_id) { + type = LLFC_TRAFFIC_TYPE_ISCSI; + iscsi_pri_found = 1; + } else if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE) && + ETH_TYPE_FCOE == entry[index].app_id) { + type = LLFC_TRAFFIC_TYPE_FCOE; + fcoe_pri_found = 1; + } - if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_PORT) && - TCP_PORT_ISCSI == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_ISCSI); + if (type == MAX_TRAFFIC_TYPE) + continue; + + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + type); + } + + /* If we have received a non-zero default application + * priority, then use that for applications which are + * not configured with any priority. + */ + if (ttp[LLFC_TRAFFIC_TYPE_NW] != 0) { + if (!iscsi_pri_found) { + ttp[LLFC_TRAFFIC_TYPE_ISCSI] = + ttp[LLFC_TRAFFIC_TYPE_NW]; + DP(BNX2X_MSG_DCB, + "ISCSI is using default priority.\n"); + } + if (!fcoe_pri_found) { + ttp[LLFC_TRAFFIC_TYPE_FCOE] = + ttp[LLFC_TRAFFIC_TYPE_NW]; + DP(BNX2X_MSG_DCB, + "FCoE is using default priority.\n"); + } } } else { DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 820b7e04bb5f..85a7800bfc12 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -981,6 +981,11 @@ static void bnx2x_get_regs(struct net_device *dev, memcpy(p, &dump_hdr, sizeof(struct dump_header)); p += dump_hdr.header_size + 1; + /* This isn't really an error, but since attention handling is going + * to print the GRC timeouts using this macro, we use the same. + */ + BNX2X_ERR("Generating register dump. Might trigger harmless GRC timeouts\n"); + /* Actually read the registers */ __bnx2x_get_regs(bp, p); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 27aa0802d87d..f8b810313094 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1824,17 +1824,22 @@ struct dcbx_app_priority_entry { u8 pri_bitmap; u8 appBitfield; #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 #define DCBX_APP_ENTRY_SF_SHIFT 4 #define DCBX_APP_SF_ETH_TYPE 0x10 #define DCBX_APP_SF_PORT 0x20 + #define DCBX_APP_SF_UDP 0x40 + #define DCBX_APP_SF_DEFAULT 0x80 #elif defined(__LITTLE_ENDIAN) u8 appBitfield; #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_ENTRY_VALID 0x01 #define DCBX_APP_SF_ETH_TYPE 0x10 #define DCBX_APP_SF_PORT 0x20 + #define DCBX_APP_SF_UDP 0x40 + #define DCBX_APP_SF_DEFAULT 0x80 u8 pri_bitmap; u16 app_id; #endif @@ -4896,9 +4901,9 @@ struct c2s_pri_trans_table_entry { * cfc delete event data */ struct cfc_del_event_data { - u32 cid; - u32 reserved0; - u32 reserved1; + __le32 cid; + __le32 reserved0; + __le32 reserved1; }; @@ -5114,15 +5119,9 @@ struct vf_pf_channel_zone_trigger { * zone that triggers the in-bound interrupt */ struct trigger_vf_zone { -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 reserved0; - struct vf_pf_channel_zone_trigger vf_pf_channel; -#elif defined(__LITTLE_ENDIAN) struct vf_pf_channel_zone_trigger vf_pf_channel; u8 reserved0; u16 reserved1; -#endif u32 reserved2; }; @@ -5207,9 +5206,9 @@ struct e2_integ_data { * set mac event data */ struct eth_event_data { - u32 echo; - u32 reserved0; - u32 reserved1; + __le32 echo; + __le32 reserved0; + __le32 reserved1; }; @@ -5219,9 +5218,9 @@ struct eth_event_data { struct vf_pf_event_data { u8 vf_id; u8 reserved0; - u16 reserved1; - u32 msg_addr_lo; - u32 msg_addr_hi; + __le16 reserved1; + __le32 msg_addr_lo; + __le32 msg_addr_hi; }; /* @@ -5230,9 +5229,9 @@ struct vf_pf_event_data { struct vf_flr_event_data { u8 vf_id; u8 reserved0; - u16 reserved1; - u32 reserved2; - u32 reserved3; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; }; /* @@ -5241,9 +5240,9 @@ struct vf_flr_event_data { struct malicious_vf_event_data { u8 vf_id; u8 err_id; - u16 reserved1; - u32 reserved2; - u32 reserved3; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; }; /* diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index d946bba43726..1fb80100e5e7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6185,26 +6185,80 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) shift -= 4; digit = ((num & mask) >> shift); if (digit == 0 && remove_leading_zeros) { - mask = mask >> 4; - continue; - } else if (digit < 0xa) - *str_ptr = digit + '0'; - else - *str_ptr = digit - 0xa + 'a'; - remove_leading_zeros = 0; - str_ptr++; - (*len)--; + *str_ptr = '0'; + } else { + if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + } mask = mask >> 4; if (shift == 4*4) { + if (remove_leading_zeros) { + str_ptr++; + (*len)--; + } *str_ptr = '.'; str_ptr++; (*len)--; remove_leading_zeros = 1; } } + if (remove_leading_zeros) + (*len)--; return 0; } +static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len) +{ + u8 *str_ptr = str; + u32 mask = 0x00f00000; + u8 shift = 8*3; + u8 digit; + u8 remove_leading_zeros = 1; + + if (*len < 10) { + /* Need more than 10chars for this format */ + *str_ptr = '\0'; + (*len)--; + return -EINVAL; + } + + while (shift > 0) { + shift -= 4; + digit = ((num & mask) >> shift); + if (digit == 0 && remove_leading_zeros) { + *str_ptr = '0'; + } else { + if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + } + mask = mask >> 4; + if ((shift == 4*4) || (shift == 4*2)) { + if (remove_leading_zeros) { + str_ptr++; + (*len)--; + } + *str_ptr = '.'; + str_ptr++; + (*len)--; + remove_leading_zeros = 1; + } + } + if (remove_leading_zeros) + (*len)--; + return 0; +} static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) { @@ -9677,8 +9731,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, if (bnx2x_is_8483x_8485x(phy)) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); - bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, - phy->ver_addr); + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) + fw_ver1 &= 0xfff; + bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr); } else { /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ @@ -9732,16 +9787,32 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val, offset, i; + u16 val, led3_blink_rate, offset, i; static struct bnx2x_reg_set reg_set[] = { {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, - {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000}, {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} }; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* Set LED5 source */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x90); + led3_blink_rate = 0x000f; + } else { + led3_blink_rate = 0x0000; + } + /* Set LED3 BLINK */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_BLINK, + led3_blink_rate); + /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -9749,6 +9820,9 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, val &= 0xFE00; val |= 0x0092; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) + val |= 2 << 12; /* LED5 ON based on source */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val); @@ -9762,10 +9836,17 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, else offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; - /* stretch_en for LED3*/ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) + val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT | + MDIO_PMA_REG_84823_LED3_STRETCH_EN; + else + val = MDIO_PMA_REG_84823_LED3_STRETCH_EN; + + /* stretch_en for LEDs */ bnx2x_cl45_read_or_write(bp, phy, - MDIO_PMA_DEVAD, offset, - MDIO_PMA_REG_84823_LED3_STRETCH_EN); + MDIO_PMA_DEVAD, + offset, + val); } static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, @@ -9775,7 +9856,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; switch (action) { case PHY_INIT: - if (!bnx2x_is_8483x_8485x(phy)) { + if (bnx2x_is_8483x_8485x(phy)) { /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, bp, params->port); } @@ -10036,15 +10117,20 @@ static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy, static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, - u16 cmd_args[], int argc) + u16 cmd_args[], int argc, int process) { int idx; u16 val; struct bnx2x *bp = params->bp; - /* Write CMD_OPEN_OVERRIDE to STATUS reg */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_STATUS, - PHY84833_STATUS_CMD_OPEN_OVERRIDE); + int rc = 0; + + if (process == PHY84833_MB_PROCESS2) { + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_OPEN_OVERRIDE); + } + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_848xx_CMD_HDLR_STATUS, &val); @@ -10054,15 +10140,27 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, } if (idx >= PHY848xx_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); + /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR + * clear the status to CMD_CLEAR_COMPLETE + */ + if (val == PHY84833_STATUS_CMD_COMPLETE_PASS || + val == PHY84833_STATUS_CMD_COMPLETE_ERROR) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + } return -EINVAL; } - - /* Prepare argument(s) and issue command */ - for (idx = 0; idx < argc; idx++) { - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_DATA1 + idx, - cmd_args[idx]); + if (process == PHY84833_MB_PROCESS1 || + process == PHY84833_MB_PROCESS2) { + /* Prepare argument(s) */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } } + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { @@ -10076,24 +10174,30 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, if ((idx >= PHY848xx_CMDHDLR_WAIT) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { DP(NETIF_MSG_LINK, "FW cmd failed.\n"); - return -EINVAL; + rc = -EINVAL; } - /* Gather returning data */ - for (idx = 0; idx < argc; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_DATA1 + idx, - &cmd_args[idx]); + if (process == PHY84833_MB_PROCESS3 && rc == 0) { + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } } - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_848xx_CMD_HDLR_STATUS, - PHY84833_STATUS_CMD_CLEAR_COMPLETE); - return 0; + if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR || + val == PHY84833_STATUS_CMD_COMPLETE_PASS) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + } + return rc; } static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, - u16 cmd_args[], int argc) + u16 cmd_args[], int argc, + int process) { struct bnx2x *bp = params->bp; @@ -10106,7 +10210,7 @@ static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, argc); } else { return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, - argc); + argc, process); } } @@ -10133,7 +10237,7 @@ static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy, status = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_PAIR_SWAP, data, - PHY848xx_CMDHDLR_MAX_ARGS); + 2, PHY84833_MB_PROCESS2); if (status == 0) DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); @@ -10222,8 +10326,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); /* Prevent Phy from working in EEE and advertising it */ - rc = bnx2x_848xx_cmd_hdlr(phy, params, - PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, + &cmd_args, 1, PHY84833_MB_PROCESS1); if (rc) { DP(NETIF_MSG_LINK, "EEE disable failed.\n"); return rc; @@ -10240,8 +10344,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 cmd_args = 1; - rc = bnx2x_848xx_cmd_hdlr(phy, params, - PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, + &cmd_args, 1, PHY84833_MB_PROCESS1); if (rc) { DP(NETIF_MSG_LINK, "EEE enable failed.\n"); return rc; @@ -10362,7 +10466,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, cmd_args[3] = PHY84833_CONSTANT_LATENCY; rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, cmd_args, - PHY848xx_CMDHDLR_MAX_ARGS); + 4, PHY84833_MB_PROCESS1); if (rc) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } @@ -10416,6 +10520,32 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + /* Additional settings for jumbo packets in 1000BASE-T mode */ + /* Allow rx extended length */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_AUX_CTRL, &val); + val |= 0x4000; + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_AUX_CTRL, val); + /* TX FIFO Elasticity LSB */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val); + val |= 0x1; + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val); + /* TX FIFO Elasticity MSB */ + /* Enable expansion register 0x46 (Pattern Generator status) */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46); + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val); + val |= 0x4000; + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val); + } + if (bnx2x_is_8483x_8485x(phy)) { /* Bring PHY out of super isolate mode as the final step. */ bnx2x_cl45_read_and_write(bp, phy, @@ -10555,6 +10685,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, return link_up; } +static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len) +{ + int status = 0; + u32 num; + + num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) | + ((raw_ver & 0xF000) >> 12); + status = bnx2x_3_seq_format_ver(num, str, len); + return status; +} + static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) { int status = 0; @@ -10651,10 +10792,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 0x0); } else { + /* LED 1 OFF */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0); + + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* LED 2 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + /* LED 3 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + } } break; case LED_MODE_FRONT_PANEL_OFF: @@ -10713,6 +10869,19 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_REG_8481_SIGNAL_MASK, 0x0); } + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* LED 2 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + /* LED 3 OFF */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + } } break; case LED_MODE_ON: @@ -10776,6 +10945,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, params->port*4, NIG_MASK_MI_INT); } + } + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + /* Tell LED3 to constant on */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= ~(7<<6); + val |= (2<<6); /* A83B[8:6]= 2 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x20); + } else { bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_SIGNAL_MASK, @@ -10854,6 +11042,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_REG_8481_LINK_SIGNAL, val); if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x18); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x06); + } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { /* Restore LED4 source to external link, * and re-enable interrupts. @@ -11982,7 +12181,7 @@ static const struct bnx2x_phy phy_84858 = { .read_status = (read_status_t)bnx2x_848xx_read_status, .link_reset = (link_reset_t)bnx2x_848x3_link_reset, .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver, .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func @@ -13807,8 +14006,10 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars) if (CHIP_IS_E3(bp)) { struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; bnx2x_set_aer_mmd(params, phy); - if ((phy->supported & SUPPORTED_20000baseKR2_Full) && - (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || + (phy->req_line_speed == SPEED_20000)) bnx2x_check_kr2_wa(params, vars, phy); bnx2x_check_over_curr(params, vars); if (vars->rx_tx_asic_rst) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6c4e3a69976f..d465bd721146 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -59,7 +59,9 @@ #include <linux/semaphore.h> #include <linux/stringify.h> #include <linux/vmalloc.h> - +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) +#include <net/geneve.h> +#endif #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" @@ -5280,14 +5282,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, { unsigned long ramrod_flags = 0; int rc = 0; - u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); + u32 cid = echo & BNX2X_SWCID_MASK; struct bnx2x_vlan_mac_obj *vlan_mac_obj; /* Always push next commands out, don't wait here */ __set_bit(RAMROD_CONT, &ramrod_flags); - switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) - >> BNX2X_SWCID_SHIFT) { + switch (echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) @@ -5308,8 +5310,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, bnx2x_handle_mcast_eqe(bp); return; default: - BNX2X_ERR("Unsupported classification command: %d\n", - elem->message.data.eth_event.echo); + BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); return; } @@ -5478,9 +5479,6 @@ static void bnx2x_eq_int(struct bnx2x *bp) goto next_spqe; } - /* elem CID originates from FW; actually LE */ - cid = SW_CID((__force __le32) - elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ @@ -5503,6 +5501,10 @@ static void bnx2x_eq_int(struct bnx2x *bp) * we may want to verify here that the bp state is * HALTING */ + + /* elem CID originates from FW; actually LE */ + cid = SW_CID(elem->message.data.cfc_del_event.cid); + DP(BNX2X_MSG_SP, "got delete ramrod for MULTI[%d]\n", cid); @@ -5596,10 +5598,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_OPENING_WAIT4_PORT): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): - cid = elem->message.data.eth_event.echo & - BNX2X_SWCID_MASK; DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", - cid); + SW_CID(elem->message.data.eth_event.echo)); rss_raw->clear_pending(rss_raw); break; @@ -5684,7 +5684,7 @@ static void bnx2x_sp_task(struct work_struct *work) if (status & BNX2X_DEF_SB_IDX) { struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - if (FCOE_INIT(bp) && + if (FCOE_INIT(bp) && (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* Prevent local bottom-halves from running as * we are going to change the local NAPI list. @@ -10076,11 +10076,13 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } -#ifdef CONFIG_BNX2X_VXLAN -static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) +static int bnx2x_udp_port_update(struct bnx2x *bp) { struct bnx2x_func_switch_update_params *switch_update_params; struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_udp_tunnel *udp_tunnel; + u16 vxlan_port = 0, geneve_port = 0; int rc; switch_update_params = &func_params.params.switch_update; @@ -10095,69 +10097,125 @@ static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) /* Function parameters */ __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, &switch_update_params->changes); - switch_update_params->vxlan_dst_port = port; + + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { + udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; + geneve_port = udp_tunnel->dst_port; + switch_update_params->geneve_dst_port = geneve_port; + } + + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { + udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; + vxlan_port = udp_tunnel->dst_port; + switch_update_params->vxlan_dst_port = vxlan_port; + } + + /* Re-enable inner-rss for the offloaded UDP tunnels */ + __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, + &switch_update_params->changes); + rc = bnx2x_func_state_change(bp, &func_params); if (rc) - BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n", - port, rc); + BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n", + vxlan_port, geneve_port, rc); + else + DP(BNX2X_MSG_SP, + "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n", + vxlan_port, geneve_port); + return rc; } -static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) +static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port, + enum bnx2x_udp_port_type type) { - if (!netif_running(bp->dev)) + struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; + + if (!netif_running(bp->dev) || !IS_PF(bp)) return; - if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) { - bp->vxlan_dst_port_count++; + if (udp_port->count && udp_port->dst_port == port) { + udp_port->count++; return; } - if (bp->vxlan_dst_port_count || !IS_PF(bp)) { - DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); + if (udp_port->count) { + DP(BNX2X_MSG_SP, + "UDP tunnel [%d] - destination port limit reached\n", + type); return; } - bp->vxlan_dst_port = port; - bp->vxlan_dst_port_count = 1; - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); + udp_port->dst_port = port; + udp_port->count = 1; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); } +static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port, + enum bnx2x_udp_port_type type) +{ + struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; + + if (!IS_PF(bp)) + return; + + if (!udp_port->count || udp_port->dst_port != port) { + DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n", + type); + return; + } + + /* Remove reference, and make certain it's no longer in use */ + udp_port->count--; + if (udp_port->count) + return; + udp_port->dst_port = 0; + + if (netif_running(bp->dev)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); + else + DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n", + type, port); +} +#endif + +#ifdef CONFIG_BNX2X_VXLAN static void bnx2x_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { struct bnx2x *bp = netdev_priv(netdev); u16 t_port = ntohs(port); - __bnx2x_add_vxlan_port(bp, t_port); + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); } -static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) +static void bnx2x_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) { - if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port || - !IS_PF(bp)) { - DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); - return; - } - bp->vxlan_dst_port_count--; - if (bp->vxlan_dst_port_count) - return; + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); - if (netif_running(bp->dev)) { - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); - } else { - bp->vxlan_dst_port = 0; - netdev_info(bp->dev, "Deleted vxlan dest port %d", port); - } + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); } +#endif -static void bnx2x_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) +static void bnx2x_add_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); + + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); +} + +static void bnx2x_del_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) { struct bnx2x *bp = netdev_priv(netdev); u16 t_port = ntohs(port); - __bnx2x_del_vxlan_port(bp, t_port); + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); } #endif @@ -10169,9 +10227,6 @@ static int bnx2x_close(struct net_device *dev); static void bnx2x_sp_rtnl_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); -#ifdef CONFIG_BNX2X_VXLAN - u16 port; -#endif rtnl_lock(); @@ -10270,23 +10325,27 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); -#ifdef CONFIG_BNX2X_VXLAN - port = bp->vxlan_dst_port; - if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT, - &bp->sp_rtnl_state)) { - if (!bnx2x_vxlan_port_update(bp, port)) - netdev_info(bp->dev, "Added vxlan dest port %d", port); - else - bp->vxlan_dst_port = 0; - } - - if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT, +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) + if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { - if (!bnx2x_vxlan_port_update(bp, 0)) { - netdev_info(bp->dev, - "Deleted vxlan dest port %d", port); - bp->vxlan_dst_port = 0; - vxlan_get_rx_port(bp->dev); + if (bnx2x_udp_port_update(bp)) { + /* On error, forget configuration */ + memset(bp->udp_tunnel_ports, 0, + sizeof(struct bnx2x_udp_tunnel) * + BNX2X_UDP_PORT_MAX); + } else { + /* Since we don't store additional port information, + * if no port is configured for any feature ask for + * information about currently configured ports. + */ +#ifdef CONFIG_BNX2X_VXLAN + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) + vxlan_get_rx_port(bp->dev); +#endif +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) + geneve_get_rx_port(bp->dev); +#endif } } #endif @@ -12368,8 +12427,10 @@ static int bnx2x_init_bp(struct bnx2x *bp) if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_HAS(bp, dcbx_en) && SHMEM2_RD(bp, dcbx_lldp_params_offset) && - SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { + SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) { bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); bnx2x_dcbx_init_params(bp); } else { @@ -12494,6 +12555,10 @@ static int bnx2x_open(struct net_device *dev) if (IS_PF(bp)) vxlan_get_rx_port(dev); #endif +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) + if (IS_PF(bp)) + geneve_get_rx_port(dev); +#endif return 0; } @@ -12994,7 +13059,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_bnx2x, #endif - .ndo_setup_tc = bnx2x_setup_tc, + .ndo_setup_tc = __bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_vlan = bnx2x_set_vf_vlan, @@ -13011,6 +13076,10 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_add_vxlan_port = bnx2x_add_vxlan_port, .ndo_del_vxlan_port = bnx2x_del_vxlan_port, #endif +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) + .ndo_add_geneve_port = bnx2x_add_geneve_port, + .ndo_del_geneve_port = bnx2x_del_geneve_port, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) @@ -14816,6 +14885,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev, } offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); + if (!offset) { + DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); + goto out; + } DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); /* Read the table contents from nvram */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 4dead49bd5cb..a43dea259b12 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -7296,6 +7296,8 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 #define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 +/* BCM84858 only */ +#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000 /* BCM84833 only */ #define MDIO_84833_TOP_CFG_FW_REV 0x400f @@ -7337,6 +7339,10 @@ Theotherbitsarereservedandshouldbezero*/ #define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 #define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 +/* Mailbox Process */ +#define PHY84833_MB_PROCESS1 1 +#define PHY84833_MB_PROCESS2 2 +#define PHY84833_MB_PROCESS3 3 /* Mailbox status set used by 84858 only */ #define PHY84858_STATUS_CMD_RECEIVED 0x0001 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 9d027348cd09..632daff117d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, { unsigned long ramrod_flags = 0; int rc = 0; + u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); /* Always push next commands out, don't wait here */ set_bit(RAMROD_CONT, &ramrod_flags); - switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + switch (echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, &ramrod_flags); @@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, &ramrod_flags); break; default: - BNX2X_ERR("Unsupported classification command: %d\n", - elem->message.data.eth_event.echo); + BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); return; } if (rc < 0) @@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) switch (opcode) { case EVENT_RING_OPCODE_CFC_DEL: - cid = SW_CID((__force __le32) - elem->message.data.cfc_del_event.cid); + cid = SW_CID(elem->message.data.cfc_del_event.cid); DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); break; case EVENT_RING_OPCODE_CLASSIFICATION_RULES: case EVENT_RING_OPCODE_MULTICAST_RULES: case EVENT_RING_OPCODE_FILTERS_RULES: case EVENT_RING_OPCODE_RSS_UPDATE_RULES: - cid = (elem->message.data.eth_event.echo & - BNX2X_SWCID_MASK); + cid = SW_CID(elem->message.data.eth_event.echo); DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); break; case EVENT_RING_OPCODE_VF_FLR: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 1374e5394a79..bfae300cf25f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp, /* Update VFDB with current message and schedule its handling */ mutex_lock(&BP_VFDB(bp)->event_mutex); - BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; - BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = + le32_to_cpu(vfpf_event->msg_addr_hi); + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = + le32_to_cpu(vfpf_event->msg_addr_lo); BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); mutex_unlock(&BP_VFDB(bp)->event_mutex); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5dc89e527e7d..aabbd51db981 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -69,7 +69,7 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define BNXT_RX_DMA_OFFSET NET_SKB_PAD #define BNXT_RX_COPY_THRESH 256 -#define BNXT_TX_PUSH_THRESH 92 +#define BNXT_TX_PUSH_THRESH 164 enum board_idx { BCM57301, @@ -223,11 +223,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { - struct tx_push_bd *push = txr->tx_push; - struct tx_bd *tx_push = &push->txbd1; - struct tx_bd_ext *tx_push1 = &push->txbd2; - void *pdata = tx_push1 + 1; - int j; + struct tx_push_buffer *tx_push_buf = txr->tx_push; + struct tx_push_bd *tx_push = &tx_push_buf->push_bd; + struct tx_bd_ext *tx_push1 = &tx_push->txbd2; + void *pdata = tx_push_buf->data; + u64 *end; + int j, push_len; /* Set COAL_NOW to be ready quickly for the next push */ tx_push->tx_bd_len_flags_type = @@ -247,6 +248,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + end = pdata + length; + end = PTR_ALIGN(end, 8) - 1; + *end = 0; + skb_copy_from_linear_data(skb, pdata, len); pdata += len; for (j = 0; j < last_frag; j++) { @@ -261,22 +266,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) pdata += skb_frag_size(frag); } - memcpy(txbd, tx_push, sizeof(*txbd)); + txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; + txbd->tx_bd_haddr = txr->data_mapping; prod = NEXT_TX(prod); txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; memcpy(txbd, tx_push1, sizeof(*txbd)); prod = NEXT_TX(prod); - push->doorbell = + tx_push->doorbell = cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); txr->tx_prod = prod; netdev_tx_sent_queue(txq, skb->len); - __iowrite64_copy(txr->tx_doorbell, push, - (length + sizeof(*push) + 8) / 8); + push_len = (length + sizeof(*tx_push) + 7) / 8; + if (push_len > 16) { + __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); + __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1, + push_len - 16); + } else { + __iowrite64_copy(txr->tx_doorbell, tx_push_buf, + push_len); + } tx_buf->is_push = 1; - goto tx_done; } @@ -1228,13 +1240,17 @@ static int bnxt_async_event_process(struct bnxt *bp, switch (event_id) { case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: + set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); break; default: netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", event_id); - break; + goto async_event_process_exit; } + schedule_work(&bp->sp_task); +async_event_process_exit: return 0; } @@ -1753,7 +1769,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + bp->tx_push_thresh); - if (push_size > 128) { + if (push_size > 256) { push_size = 0; bp->tx_push_thresh = 0; } @@ -1772,7 +1788,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) return rc; if (bp->tx_push_size) { - struct tx_bd *txbd; dma_addr_t mapping; /* One pre-allocated DMA buffer to backup @@ -1786,13 +1801,11 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) if (!txr->tx_push) return -ENOMEM; - txbd = &txr->tx_push->txbd1; - mapping = txr->tx_push_mapping + sizeof(struct tx_push_bd); - txbd->tx_bd_haddr = cpu_to_le64(mapping); + txr->data_mapping = cpu_to_le64(mapping); - memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); + memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } ring->queue_id = bp->q_info[j].queue_id; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) @@ -2349,6 +2362,14 @@ static void bnxt_free_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; + if (bp->hw_rx_port_stats) { + dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, + bp->hw_rx_port_stats, + bp->hw_rx_port_stats_map); + bp->hw_rx_port_stats = NULL; + bp->flags &= ~BNXT_FLAG_PORT_STATS; + } + if (!bp->bnapi) return; @@ -2385,6 +2406,24 @@ static int bnxt_alloc_stats(struct bnxt *bp) cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } + + if (BNXT_PF(bp)) { + bp->hw_port_stats_size = sizeof(struct rx_port_stats) + + sizeof(struct tx_port_stats) + 1024; + + bp->hw_rx_port_stats = + dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, + &bp->hw_rx_port_stats_map, + GFP_KERNEL); + if (!bp->hw_rx_port_stats) + return -ENOMEM; + + bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + + 512; + bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + + sizeof(struct rx_port_stats) + 512; + bp->flags |= BNXT_FLAG_PORT_STATS; + } return 0; } @@ -2588,28 +2627,27 @@ alloc_mem_err: void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, u16 cmpl_ring, u16 target_id) { - struct hwrm_cmd_req_hdr *req = request; + struct input *req = request; - req->cmpl_ring_req_type = - cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT)); - req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT); + req->req_type = cpu_to_le16(req_type); + req->cmpl_ring = cpu_to_le16(cmpl_ring); + req->target_id = cpu_to_le16(target_id); req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); } -int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, + int timeout, bool silent) { int i, intr_process, rc; - struct hwrm_cmd_req_hdr *req = msg; + struct input *req = msg; u32 *data = msg; __le32 *resp_len, *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++); + req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); memset(resp, 0, PAGE_SIZE); - cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) & - HWRM_CMPL_RING_MASK) >> - HWRM_CMPL_RING_SFT; + cp_ring_id = le16_to_cpu(req->cmpl_ring); intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; /* Write request msg to hwrm channel */ @@ -2620,12 +2658,14 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) /* currently supports only one outstanding message */ if (intr_process) - bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & - HWRM_SEQ_ID_MASK; + bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); /* Ring channel doorbell */ writel(1, bp->bar0 + 0x100); + if (!timeout) + timeout = DFLT_HWRM_CMD_TIMEOUT; + i = 0; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ @@ -2636,7 +2676,7 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - req->cmpl_ring_req_type); + le16_to_cpu(req->req_type)); return -1; } } else { @@ -2652,8 +2692,8 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) if (i >= timeout) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - timeout, req->cmpl_ring_req_type, - req->target_id_seq_id, *resp_len); + timeout, le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), *resp_len); return -1; } @@ -2667,20 +2707,23 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) if (i >= timeout) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - timeout, req->cmpl_ring_req_type, - req->target_id_seq_id, len, *valid); + timeout, le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), len, *valid); return -1; } } rc = le16_to_cpu(resp->error_code); - if (rc) { + if (rc && !silent) netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", le16_to_cpu(resp->req_type), le16_to_cpu(resp->seq_id), rc); - return rc; - } - return 0; + return rc; +} + +int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +{ + return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); } int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) @@ -2693,6 +2736,17 @@ int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) return rc; } +int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, + int timeout) +{ + int rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) { struct hwrm_func_drv_rgtr_input req = {0}; @@ -3509,47 +3563,82 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, + u32 buf_tmrs, u16 flags, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + req->flags = cpu_to_le16(flags); + req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); + req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); + req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); + /* Minimum time between 2 interrupts set to buf_tmr x 2 */ + req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); + req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); + req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); +} + int bnxt_hwrm_set_coal(struct bnxt *bp) { int i, rc = 0; - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, + req_tx = {0}, *req; u16 max_buf, max_buf_irq; u16 buf_tmr, buf_tmr_irq; u32 flags; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, - -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &req_rx, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &req_tx, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - /* Each rx completion (2 records) should be DMAed immediately */ - max_buf = min_t(u16, bp->coal_bufs / 4, 2); + /* Each rx completion (2 records) should be DMAed immediately. + * DMA 1/4 of the completion buffers at a time. + */ + max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); /* max_buf must not be zero */ max_buf = clamp_t(u16, max_buf, 1, 63); - max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63); - buf_tmr = max_t(u16, bp->coal_ticks / 4, 1); - buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1); + max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); + buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); + /* buf timer set to 1/4 of interrupt timer */ + buf_tmr = max_t(u16, buf_tmr / 4, 1); + buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); + buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; /* RING_IDLE generates more IRQs for lower latency. Enable it only * if coal_ticks is less than 25 us. */ - if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25) + if (bp->rx_coal_ticks < 25) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; - req.flags = cpu_to_le16(flags); - req.num_cmpl_dma_aggr = cpu_to_le16(max_buf); - req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq); - req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr); - req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq); - req.int_lat_tmr_min = cpu_to_le16(buf_tmr); - req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks); - req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs); + bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, + buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); + + /* max_buf must not be zero */ + max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); + max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); + buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); + /* buf timer set to 1/4 of interrupt timer */ + buf_tmr = max_t(u16, buf_tmr / 4, 1); + buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); + buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); + + flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, + buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { - req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + struct bnxt_napi *bnapi = bp->bnapi[i]; - rc = _hwrm_send_message(bp, &req, sizeof(req), + req = &req_rx; + if (!bnapi->rx_ring) + req = &req_tx; + req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + + rc = _hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); if (rc) break; @@ -3758,15 +3847,36 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_upd); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", + snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); + bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); + if (!bp->hwrm_cmd_timeout) + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; } +static int bnxt_hwrm_port_qstats(struct bnxt *bp) +{ + int rc; + struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_port_qstats_input req = {0}; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); + req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return rc; +} + static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@ -4401,6 +4511,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->pause = resp->pause; link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; + link_info->lp_pause = resp->link_partner_adv_pause; link_info->force_pause_setting = resp->force_pause; link_info->duplex_setting = resp->duplex; if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -4411,6 +4522,8 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed); link_info->support_speeds = le16_to_cpu(resp->support_speeds); link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->lp_auto_link_speeds = + le16_to_cpu(resp->link_partner_adv_speeds); link_info->preemphasis = le32_to_cpu(resp->preemphasis); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; @@ -4546,20 +4659,18 @@ static int bnxt_update_phy_setting(struct bnxt *bp) if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && link_info->force_pause_setting != link_info->req_flow_ctrl) update_pause = true; - if (link_info->req_duplex != link_info->duplex_setting) - update_link = true; if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { if (BNXT_AUTO_MODE(link_info->auto_mode)) update_link = true; if (link_info->req_link_speed != link_info->force_link_speed) update_link = true; + if (link_info->req_duplex != link_info->duplex_setting) + update_link = true; } else { if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) update_link = true; if (link_info->advertising != link_info->auto_link_speeds) update_link = true; - if (link_info->req_link_speed != link_info->auto_link_speed) - update_link = true; } if (update_link) @@ -4636,7 +4747,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (link_re_init) { rc = bnxt_update_phy_setting(bp); if (rc) - goto open_err; + netdev_warn(bp->dev, "failed to update phy settings\n"); } if (irq_re_init) { @@ -4654,6 +4765,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Enable TX queues */ bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); + bnxt_update_link(bp, true); return 0; @@ -4823,6 +4935,22 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct rx_port_stats *rx = bp->hw_rx_port_stats; + struct tx_port_stats *tx = bp->hw_tx_port_stats; + + stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); + stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); + stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + + le64_to_cpu(rx->rx_ovrsz_frames) + + le64_to_cpu(rx->rx_runt_frames); + stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + + le64_to_cpu(rx->rx_jbr_frames); + stats->collisions = le64_to_cpu(tx->tx_total_collisions); + stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); + stats->tx_errors = le64_to_cpu(tx->tx_err); + } + return stats; } @@ -5163,6 +5291,10 @@ static void bnxt_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; + if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { + set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -5214,6 +5346,9 @@ static void bnxt_sp_task(struct work_struct *work) rtnl_unlock(); } + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) + bnxt_hwrm_port_qstats(bp); + smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } @@ -5277,6 +5412,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) goto init_err_release; } + pci_enable_pcie_error_reporting(pdev); + INIT_WORK(&bp->sp_task, bnxt_sp_task); spin_lock_init(&bp->ntp_fltr_lock); @@ -5284,10 +5421,16 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; - bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4); - bp->coal_bufs = 20; - bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1); - bp->coal_bufs_irq = 2; + /* tick values in micro seconds */ + bp->rx_coal_ticks = 12; + bp->rx_coal_bufs = 30; + bp->rx_coal_ticks_irq = 1; + bp->rx_coal_bufs_irq = 2; + + bp->tx_coal_ticks = 25; + bp->tx_coal_bufs = 30; + bp->tx_coal_ticks_irq = 2; + bp->tx_coal_bufs_irq = 2; init_timer(&bp->timer); bp->timer.data = (unsigned long)bp; @@ -5370,9 +5513,16 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u8 tc) +static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) { struct bnxt *bp = netdev_priv(dev); + u8 tc; + + if (ntc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + tc = ntc->tc; if (tc > bp->max_tc) { netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", @@ -5545,6 +5695,8 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) } } } + if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) + netdev_info(bp->dev, "Receive PF driver unload event!"); } #else @@ -5641,6 +5793,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) bnxt_sriov_disable(bp); + pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); cancel_work_sync(&bp->sp_task); bp->sp_event = 0; @@ -5660,7 +5813,6 @@ static int bnxt_probe_phy(struct bnxt *bp) { int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; - char phy_ver[PHY_VER_STR_LEN]; rc = bnxt_update_link(bp, false); if (rc) { @@ -5670,27 +5822,16 @@ static int bnxt_probe_phy(struct bnxt *bp) } /*initialize the ethool setting copy with NVM settings */ - if (BNXT_AUTO_MODE(link_info->auto_mode)) - link_info->autoneg |= BNXT_AUTONEG_SPEED; - - if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { - if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH) - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + link_info->autoneg = BNXT_AUTONEG_SPEED | + BNXT_AUTONEG_FLOW_CTRL; + link_info->advertising = link_info->auto_link_speeds; link_info->req_flow_ctrl = link_info->auto_pause_setting; - } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { + } else { + link_info->req_link_speed = link_info->force_link_speed; + link_info->req_duplex = link_info->duplex_setting; link_info->req_flow_ctrl = link_info->force_pause_setting; } - link_info->req_duplex = link_info->duplex_setting; - if (link_info->autoneg & BNXT_AUTONEG_SPEED) - link_info->req_link_speed = link_info->auto_link_speed; - else - link_info->req_link_speed = link_info->force_link_speed; - link_info->advertising = link_info->auto_link_speeds; - snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", - link_info->phy_ver[0], - link_info->phy_ver[1], - link_info->phy_ver[2]); - strcat(bp->fw_ver_str, phy_ver); return rc; } @@ -5892,11 +6033,117 @@ init_err_free: return rc; } +/** + * bnxt_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + bnxt_close(netdev); + + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnxt_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); + int err = 0; + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + + netdev_info(bp->dev, "PCI Slot Reset\n"); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + } else { + pci_set_master(pdev); + + if (netif_running(netdev)) + err = bnxt_open(netdev); + + if (!err) + result = PCI_ERS_RESULT_RECOVERED; + } + + if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) + dev_close(netdev); + + rtnl_unlock(); + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); /* non-fatal, continue */ + } + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * bnxt_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void bnxt_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + rtnl_lock(); + + netif_device_attach(netdev); + + rtnl_unlock(); +} + +static const struct pci_error_handlers bnxt_err_handler = { + .error_detected = bnxt_io_error_detected, + .slot_reset = bnxt_io_slot_reset, + .resume = bnxt_io_resume +}; + static struct pci_driver bnxt_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnxt_pci_tbl, .probe = bnxt_init_one, .remove = bnxt_remove_one, + .err_handler = &bnxt_err_handler, #if defined(CONFIG_BNXT_SRIOV) .sriov_configure = bnxt_sriov_configure, #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 8af3ca8efcef..ec04c47172b7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -411,8 +411,8 @@ struct rx_tpa_end_cmp_ext { #define BNXT_NUM_TESTS(bp) 0 -#define BNXT_DEFAULT_RX_RING_SIZE 1023 -#define BNXT_DEFAULT_TX_RING_SIZE 512 +#define BNXT_DEFAULT_RX_RING_SIZE 511 +#define BNXT_DEFAULT_TX_RING_SIZE 511 #define MAX_TPA 64 @@ -477,12 +477,15 @@ struct rx_tpa_end_cmp_ext { #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) -#define HWRM_CMD_TIMEOUT 500 +#define DFLT_HWRM_CMD_TIMEOUT 500 +#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) #define HWRM_RESP_ERR_CODE_MASK 0xffff +#define HWRM_RESP_LEN_OFFSET 4 #define HWRM_RESP_LEN_MASK 0xffff0000 #define HWRM_RESP_LEN_SFT 16 #define HWRM_RESP_VALID_MASK 0xff000000 +#define HWRM_SEQ_ID_INVALID -1 #define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) @@ -523,10 +526,16 @@ struct bnxt_ring_struct { struct tx_push_bd { __le32 doorbell; - struct tx_bd txbd1; + __le32 tx_bd_len_flags_type; + u32 tx_bd_opaque; struct tx_bd_ext txbd2; }; +struct tx_push_buffer { + struct tx_push_bd push_bd; + u32 data[25]; +}; + struct bnxt_tx_ring_info { struct bnxt_napi *bnapi; u16 tx_prod; @@ -538,8 +547,9 @@ struct bnxt_tx_ring_info { dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; - struct tx_push_bd *tx_push; + struct tx_push_buffer *tx_push; dma_addr_t tx_push_mapping; + __le64 data_mapping; #define BNXT_DEV_STATE_CLOSING 0x1 u32 dev_state; @@ -637,19 +647,6 @@ struct bnxt_irq { #define INVALID_STATS_CTX_ID -1 -struct hwrm_cmd_req_hdr { -#define HWRM_CMPL_RING_MASK 0xffff0000 -#define HWRM_CMPL_RING_SFT 16 - __le32 cmpl_ring_req_type; -#define HWRM_SEQ_ID_MASK 0xffff -#define HWRM_SEQ_ID_INVALID -1 -#define HWRM_RESP_LEN_OFFSET 4 -#define HWRM_TARGET_FID_MASK 0xffff0000 -#define HWRM_TARGET_FID_SFT 16 - __le32 target_id_seq_id; - __le64 resp_addr; -}; - struct bnxt_ring_grp_info { u16 fw_stats_ctx; u16 fw_grp_id; @@ -760,10 +757,6 @@ struct bnxt_ntuple_filter { #define BNXT_FLTR_UPDATE 1 }; -#define BNXT_ALL_COPPER_ETHTOOL_SPEED \ - (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \ - ADVERTISED_10000baseT_Full) - struct bnxt_link_info { u8 media_type; u8 transceiver; @@ -783,6 +776,7 @@ struct bnxt_link_info { #define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX #define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \ PORT_PHY_QCFG_RESP_PAUSE_TX) + u8 lp_pause; u8 auto_pause_setting; u8 force_pause_setting; u8 duplex_setting; @@ -817,6 +811,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB + u16 lp_auto_link_speeds; u16 auto_link_speed; u16 force_link_speed; u32 preemphasis; @@ -878,6 +873,7 @@ struct bnxt { #define BNXT_FLAG_MSIX_CAP 0x80 #define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_SHARED_RINGS 0x200 + #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -930,7 +926,7 @@ struct bnxt { struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; unsigned int current_interval; -#define BNXT_TIMER_INTERVAL (HZ / 2) +#define BNXT_TIMER_INTERVAL HZ struct timer_list timer; @@ -950,6 +946,14 @@ struct bnxt { void *hwrm_dbg_resp_addr; dma_addr_t hwrm_dbg_resp_dma_addr; #define HWRM_DBG_REG_BUF_SIZE 128 + + struct rx_port_stats *hw_rx_port_stats; + struct tx_port_stats *hw_tx_port_stats; + dma_addr_t hw_rx_port_stats_map; + dma_addr_t hw_tx_port_stats_map; + int hw_port_stats_size; + + int hwrm_cmd_timeout; struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ struct hwrm_ver_get_output ver_resp; #define FW_VER_STR_LEN 32 @@ -961,13 +965,17 @@ struct bnxt { __le16 vxlan_fw_dst_port_id; u8 nge_port_cnt; __le16 nge_fw_dst_port_id; - u16 coal_ticks; - u16 coal_ticks_irq; - u16 coal_bufs; - u16 coal_bufs_irq; + + u16 rx_coal_ticks; + u16 rx_coal_ticks_irq; + u16 rx_coal_bufs; + u16 rx_coal_bufs_irq; + u16 tx_coal_ticks; + u16 tx_coal_ticks_irq; + u16 tx_coal_bufs; + u16 tx_coal_bufs_irq; #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) -#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25) struct work_struct sp_task; unsigned long sp_event; @@ -979,6 +987,8 @@ struct bnxt { #define BNXT_VXLAN_DEL_PORT_SP_EVENT 5 #define BNXT_RESET_TASK_SP_EVENT 6 #define BNXT_RST_RING_SP_EVENT 7 +#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 +#define BNXT_PERIODIC_STATS_SP_EVENT 9 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV @@ -1092,6 +1102,7 @@ void bnxt_set_ring_params(struct bnxt *); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int); +int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_set_coal(struct bnxt *); int bnxt_hwrm_func_qcaps(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 922b898e7a32..9ada1662b651 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -7,6 +7,8 @@ * the Free Software Foundation. */ +#include <linux/ctype.h> +#include <linux/stringify.h> #include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/pci.h> @@ -20,6 +22,8 @@ #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) +static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen); + static u32 bnxt_get_msglevel(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -41,12 +45,16 @@ static int bnxt_get_coalesce(struct net_device *dev, memset(coal, 0, sizeof(*coal)); - coal->rx_coalesce_usecs = - max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1); - coal->rx_max_coalesced_frames = bp->coal_bufs / 2; - coal->rx_coalesce_usecs_irq = - max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1); - coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2; + coal->rx_coalesce_usecs = bp->rx_coal_ticks; + /* 2 completion records per rx packet */ + coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2; + coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq; + coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2; + + coal->tx_coalesce_usecs = bp->tx_coal_ticks; + coal->tx_max_coalesced_frames = bp->tx_coal_bufs; + coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; + coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; return 0; } @@ -57,11 +65,16 @@ static int bnxt_set_coalesce(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int rc = 0; - bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs); - bp->coal_bufs = coal->rx_max_coalesced_frames * 2; - bp->coal_ticks_irq = - BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq); - bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; + bp->rx_coal_ticks = coal->rx_coalesce_usecs; + /* 2 completion records per rx packet */ + bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2; + bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq; + bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; + + bp->tx_coal_ticks = coal->tx_coalesce_usecs; + bp->tx_coal_bufs = coal->tx_max_coalesced_frames; + bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; + bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; if (netif_running(dev)) rc = bnxt_hwrm_set_coal(bp); @@ -71,13 +84,99 @@ static int bnxt_set_coalesce(struct net_device *dev, #define BNXT_NUM_STATS 21 +#define BNXT_RX_STATS_OFFSET(counter) \ + (offsetof(struct rx_port_stats, counter) / 8) + +#define BNXT_RX_STATS_ENTRY(counter) \ + { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } + +#define BNXT_TX_STATS_OFFSET(counter) \ + ((offsetof(struct tx_port_stats, counter) + \ + sizeof(struct rx_port_stats) + 512) / 8) + +#define BNXT_TX_STATS_ENTRY(counter) \ + { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_port_stats_arr[] = { + BNXT_RX_STATS_ENTRY(rx_64b_frames), + BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), + BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), + BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), + BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), + BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames), + BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), + BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), + BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), + BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), + BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), + BNXT_RX_STATS_ENTRY(rx_total_frames), + BNXT_RX_STATS_ENTRY(rx_ucast_frames), + BNXT_RX_STATS_ENTRY(rx_mcast_frames), + BNXT_RX_STATS_ENTRY(rx_bcast_frames), + BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), + BNXT_RX_STATS_ENTRY(rx_ctrl_frames), + BNXT_RX_STATS_ENTRY(rx_pause_frames), + BNXT_RX_STATS_ENTRY(rx_pfc_frames), + BNXT_RX_STATS_ENTRY(rx_align_err_frames), + BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), + BNXT_RX_STATS_ENTRY(rx_jbr_frames), + BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), + BNXT_RX_STATS_ENTRY(rx_tagged_frames), + BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), + BNXT_RX_STATS_ENTRY(rx_good_frames), + BNXT_RX_STATS_ENTRY(rx_undrsz_frames), + BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), + BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), + BNXT_RX_STATS_ENTRY(rx_bytes), + BNXT_RX_STATS_ENTRY(rx_runt_bytes), + BNXT_RX_STATS_ENTRY(rx_runt_frames), + + BNXT_TX_STATS_ENTRY(tx_64b_frames), + BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), + BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), + BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), + BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), + BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames), + BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), + BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames), + BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), + BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), + BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), + BNXT_TX_STATS_ENTRY(tx_good_frames), + BNXT_TX_STATS_ENTRY(tx_total_frames), + BNXT_TX_STATS_ENTRY(tx_ucast_frames), + BNXT_TX_STATS_ENTRY(tx_mcast_frames), + BNXT_TX_STATS_ENTRY(tx_bcast_frames), + BNXT_TX_STATS_ENTRY(tx_pause_frames), + BNXT_TX_STATS_ENTRY(tx_pfc_frames), + BNXT_TX_STATS_ENTRY(tx_jabber_frames), + BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), + BNXT_TX_STATS_ENTRY(tx_err), + BNXT_TX_STATS_ENTRY(tx_fifo_underruns), + BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), + BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), + BNXT_TX_STATS_ENTRY(tx_total_collisions), + BNXT_TX_STATS_ENTRY(tx_bytes), +}; + +#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) + static int bnxt_get_sset_count(struct net_device *dev, int sset) { struct bnxt *bp = netdev_priv(dev); switch (sset) { - case ETH_SS_STATS: - return BNXT_NUM_STATS * bp->cp_nr_rings; + case ETH_SS_STATS: { + int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + + if (bp->flags & BNXT_FLAG_PORT_STATS) + num_stats += BNXT_NUM_PORT_STATS; + + return num_stats; + } default: return -EOPNOTSUPP; } @@ -106,6 +205,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; + + for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { + buf[j] = le64_to_cpu(*(port_stats + + bnxt_port_stats_arr[i].offset)); + } + } } static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -160,6 +267,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) sprintf(buf, "[%d]: rx_l4_csum_errors", i); buf += ETH_GSTRING_LEN; } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { + strcpy(buf, bnxt_port_stats_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } break; default: netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", @@ -460,10 +573,20 @@ static void bnxt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnxt *bp = netdev_priv(dev); + char *pkglog; + char *pkgver = NULL; + pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); + if (pkglog) + pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) + snprintf(info->fw_version, sizeof(info->fw_version) - 1, + "%s pkg %s", bp->fw_ver_str, pkgver); + else + strlcpy(info->fw_version, bp->fw_ver_str, + sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; info->testinfo_len = BNXT_NUM_TESTS(bp); @@ -471,30 +594,32 @@ static void bnxt_get_drvinfo(struct net_device *dev, info->eedump_len = 0; /* TODO CHIMP FW: reg dump details */ info->regdump_len = 0; + kfree(pkglog); } -static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) { - u16 fw_speeds = link_info->support_speeds; u32 speed_mask = 0; + /* TODO: support 25GB, 40GB, 50GB with different cable type */ + /* set the advertised speeds */ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) - speed_mask |= SUPPORTED_100baseT_Full; + speed_mask |= ADVERTISED_100baseT_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) - speed_mask |= SUPPORTED_1000baseT_Full; + speed_mask |= ADVERTISED_1000baseT_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) - speed_mask |= SUPPORTED_2500baseX_Full; + speed_mask |= ADVERTISED_2500baseX_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) - speed_mask |= SUPPORTED_10000baseT_Full; - /* TODO: support 25GB, 50GB with different cable type */ - if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) - speed_mask |= SUPPORTED_20000baseMLD2_Full | - SUPPORTED_20000baseKR2_Full; + speed_mask |= ADVERTISED_10000baseT_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) - speed_mask |= SUPPORTED_40000baseKR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseLR4_Full; + speed_mask |= ADVERTISED_40000baseCR4_Full; + + if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) + speed_mask |= ADVERTISED_Pause; + else if (fw_pause & BNXT_LINK_PAUSE_TX) + speed_mask |= ADVERTISED_Asym_Pause; + else if (fw_pause & BNXT_LINK_PAUSE_RX) + speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; return speed_mask; } @@ -502,28 +627,32 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) { u16 fw_speeds = link_info->auto_link_speeds; - u32 speed_mask = 0; + u8 fw_pause = 0; - /* TODO: support 25GB, 40GB, 50GB with different cable type */ - /* set the advertised speeds */ - if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) - speed_mask |= ADVERTISED_100baseT_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) - speed_mask |= ADVERTISED_1000baseT_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) - speed_mask |= ADVERTISED_2500baseX_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) - speed_mask |= ADVERTISED_10000baseT_Full; - /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/ - if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) - speed_mask |= ADVERTISED_20000baseMLD2_Full | - ADVERTISED_20000baseKR2_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) - speed_mask |= ADVERTISED_40000baseKR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseLR4_Full; - return speed_mask; + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + fw_pause = link_info->auto_pause_setting; + + return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); +} + +static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->lp_auto_link_speeds; + u8 fw_pause = 0; + + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + fw_pause = link_info->lp_pause; + + return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); +} + +static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->support_speeds; + u32 supported; + + supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; } u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) @@ -561,38 +690,18 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (link_info->auto_link_speeds) cmd->supported |= SUPPORTED_Autoneg; - if (BNXT_AUTO_MODE(link_info->auto_mode)) { + if (link_info->autoneg) { cmd->advertising = bnxt_fw_to_ethtool_advertised_spds(link_info); cmd->advertising |= ADVERTISED_Autoneg; cmd->autoneg = AUTONEG_ENABLE; + if (link_info->phy_link_status == BNXT_LINK_LINK) + cmd->lp_advertising = + bnxt_fw_to_ethtool_lp_adv(link_info); } else { cmd->autoneg = AUTONEG_DISABLE; cmd->advertising = 0; } - if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { - if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == - BNXT_LINK_PAUSE_BOTH) { - cmd->advertising |= ADVERTISED_Pause; - cmd->supported |= SUPPORTED_Pause; - } else { - cmd->advertising |= ADVERTISED_Asym_Pause; - cmd->supported |= SUPPORTED_Asym_Pause; - if (link_info->auto_pause_setting & - BNXT_LINK_PAUSE_RX) - cmd->advertising |= ADVERTISED_Pause; - } - } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { - if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) == - BNXT_LINK_PAUSE_BOTH) { - cmd->supported |= SUPPORTED_Pause; - } else { - cmd->supported |= SUPPORTED_Asym_Pause; - if (link_info->force_pause_setting & - BNXT_LINK_PAUSE_RX) - cmd->supported |= SUPPORTED_Pause; - } - } cmd->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { @@ -670,6 +779,9 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising) if (advertising & ADVERTISED_10000baseT_Full) fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; + if (advertising & ADVERTISED_40000baseCR4_Full) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; + return fw_speed_mask; } @@ -685,16 +797,10 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return rc; if (cmd->autoneg == AUTONEG_ENABLE) { - if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { - netdev_err(dev, "Media type doesn't support autoneg\n"); - rc = -EINVAL; - goto set_setting_exit; - } - if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED | - ADVERTISED_Autoneg | - ADVERTISED_TP | - ADVERTISED_Pause | - ADVERTISED_Asym_Pause)) { + u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info); + + if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | + ADVERTISED_TP | ADVERTISED_FIBRE)) { netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", cmd->advertising); rc = -EINVAL; @@ -729,7 +835,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = ethtool_cmd_speed(cmd); link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; - link_info->autoneg &= ~BNXT_AUTONEG_SPEED; + link_info->autoneg = 0; link_info->advertising = 0; } @@ -748,10 +854,11 @@ static void bnxt_get_pauseparam(struct net_device *dev, if (BNXT_VF(bp)) return; - epause->autoneg = !!(link_info->auto_pause_setting & - BNXT_LINK_PAUSE_BOTH); - epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); - epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); + epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); + epause->rx_pause = + ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0); + epause->tx_pause = + ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0); } static int bnxt_set_pauseparam(struct net_device *dev, @@ -765,6 +872,9 @@ static int bnxt_set_pauseparam(struct net_device *dev, return rc; if (epause->autoneg) { + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) + return -EINVAL; + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; } else { @@ -1122,6 +1232,85 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, return rc; } +static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, + u16 ext, u16 *index, u32 *item_length, + u32 *data_length) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + struct hwrm_nvm_find_dir_entry_input req = {0}; + struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); + req.enables = 0; + req.dir_idx = 0; + req.dir_type = cpu_to_le16(type); + req.dir_ordinal = cpu_to_le16(ordinal); + req.dir_ext = cpu_to_le16(ext); + req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == 0) { + if (index) + *index = le16_to_cpu(output->dir_idx); + if (item_length) + *item_length = le32_to_cpu(output->dir_item_length); + if (data_length) + *data_length = le32_to_cpu(output->dir_data_length); + } + return rc; +} + +static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) +{ + char *retval = NULL; + char *p; + char *value; + int field = 0; + + if (datalen < 1) + return NULL; + /* null-terminate the log data (removing last '\n'): */ + data[datalen - 1] = 0; + for (p = data; *p != 0; p++) { + field = 0; + retval = NULL; + while (*p != 0 && *p != '\n') { + value = p; + while (*p != 0 && *p != '\t' && *p != '\n') + p++; + if (field == desired_field) + retval = value; + if (*p != '\t') + break; + *p = 0; + field++; + p++; + } + if (*p == 0) + break; + *p = 0; + } + return retval; +} + +static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) +{ + u16 index = 0; + u32 datalen; + + if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, + &index, NULL, &datalen) != 0) + return NULL; + + memset(buf, 0, buflen); + if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) + return NULL; + + return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, + datalen); +} + static int bnxt_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 3cf3e1b70b64..43ef392c8588 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -50,10 +50,24 @@ enum bnxt_nvm_directory_type { #define BNX_DIR_ORDINAL_FIRST 0 +#define BNX_DIR_EXT_NONE 0 #define BNX_DIR_EXT_INACTIVE (1 << 0) #define BNX_DIR_EXT_UPDATE (1 << 1) +#define BNX_DIR_ATTR_NONE 0 #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) +#define BNX_PKG_LOG_MAX_LENGTH 4096 + +enum bnxnvm_pkglog_field_index { + BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, + BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, + BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2, + BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3, + BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6 +}; + #endif /* Don't add anything after this line */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c1cc83d7e38c..0c5f510492f1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -522,6 +522,46 @@ err_out1: return rc; } +static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, + struct bnxt_vf_info *vf, + u16 event_id) +{ + int rc = 0; + struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_async_event_cmpl *async_cmpl; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); + if (vf) + req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + else + /* broadcast this async event to all VFs */ + req.encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + async_cmpl->type = + cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); + async_cmpl->event_id = cpu_to_le16(event_id); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", + rc); + goto fwd_async_event_cmpl_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", + resp->error_code); + rc = -1; + } + +fwd_async_event_cmpl_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + void bnxt_sriov_disable(struct bnxt *bp) { u16 num_vfs = pci_num_vf(bp->pdev); @@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp) return; if (pci_vfs_assigned(bp->pdev)) { + bnxt_hwrm_fwd_async_event_cmpl( + bp, NULL, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", num_vfs); } else { @@ -758,8 +801,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) { int rc = 0; - struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr; - u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff; + struct input *encap_req = vf->hwrm_cmd_req_addr; + u32 req_type = le16_to_cpu(encap_req->req_type); switch (req_type) { case HWRM_CFA_L2_FILTER_ALLOC: @@ -809,13 +852,19 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) goto update_vf_mac_exit; - if (!is_valid_ether_addr(resp->perm_mac_address)) - goto update_vf_mac_exit; - + /* Store MAC address from the firmware. There are 2 cases: + * 1. MAC address is valid. It is assigned from the PF and we + * need to override the current VF MAC address with it. + * 2. MAC address is zero. The VF will use a random MAC address by + * default but the stored zero MAC will allow the VF user to change + * the random MAC address using ndo_set_mac_address() if he wants. + */ if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); - /* overwrite netdev dev_adr with admin VF MAC */ - memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + + /* overwrite netdev dev_addr with admin VF MAC */ + if (is_valid_ether_addr(bp->vf.mac_addr)) + memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index b15a60d787c7..6746fd03cb3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, dma_unmap_addr(tx_cb_ptr, dma_addr), - tx_cb_ptr->skb->len, + dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); bcmgenet_free_cb(tx_cb_ptr); } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { @@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, } dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); + dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len); length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | DMA_TX_APPEND_CRC; @@ -2445,8 +2445,7 @@ static void bcmgenet_irq_task(struct work_struct *work) } /* Link UP/DOWN event */ - if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && - (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) { + if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { phy_mac_interrupt(priv->phydev, !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 04b0d16b210e..95bc470ae441 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) if (!list_empty(&rxf->ucast_pending_add_q)) { mac = list_first_entry(&rxf->ucast_pending_add_q, struct bna_mac, qe); - list_add_tail(&mac->qe, &rxf->ucast_active_q); + list_move_tail(&mac->qe, &rxf->ucast_active_q); bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); return 1; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 50c94104f19c..3ce6095ced3d 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -58,6 +58,9 @@ #define GEM_MTU_MIN_SIZE 68 +#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) +#define MACB_WOL_ENABLED (0x1 << 1) + /* * Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) @@ -2124,6 +2127,39 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, } } +static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct macb *bp = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + + if (bp->wol & MACB_WOL_ENABLED) + wol->wolopts |= WAKE_MAGIC; + } +} + +static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct macb *bp = netdev_priv(netdev); + + if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || + (wol->wolopts & ~WAKE_MAGIC)) + return -EOPNOTSUPP; + + if (wol->wolopts & WAKE_MAGIC) + bp->wol |= MACB_WOL_ENABLED; + else + bp->wol &= ~MACB_WOL_ENABLED; + + device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); + + return 0; +} + static const struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, @@ -2131,6 +2167,8 @@ static const struct ethtool_ops macb_ethtool_ops = { .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_wol = macb_get_wol, + .set_wol = macb_set_wol, }; static const struct ethtool_ops gem_ethtool_ops = { @@ -2402,9 +2440,9 @@ static int macb_init(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) val = GEM_BIT(RGMII); else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(RMII); - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(MII); if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) @@ -2736,7 +2774,7 @@ static int at91ether_init(struct platform_device *pdev) } static const struct macb_config at91sam9260_config = { - .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII, + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, }; @@ -2749,21 +2787,22 @@ static const struct macb_config pc302gem_config = { }; static const struct macb_config sama5d2_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d3_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE + | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d4_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 4, .clk_init = macb_clk_init, .init = macb_init, @@ -2890,6 +2929,11 @@ static int macb_probe(struct platform_device *pdev) if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; + bp->wol = 0; + if (of_get_property(np, "magic-packet", NULL)) + bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; + device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + spin_lock_init(&bp->lock); /* setup capabilities */ @@ -3006,9 +3050,15 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_carrier_off(netdev); netif_device_detach(netdev); - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); + if (bp->wol & MACB_WOL_ENABLED) { + macb_writel(bp, IER, MACB_BIT(WOL)); + macb_writel(bp, WOL, MACB_BIT(MAG)); + enable_irq_wake(bp->queues[0].irq); + } else { + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + } return 0; } @@ -3019,9 +3069,15 @@ static int __maybe_unused macb_resume(struct device *dev) struct net_device *netdev = platform_get_drvdata(pdev); struct macb *bp = netdev_priv(netdev); - clk_prepare_enable(bp->pclk); - clk_prepare_enable(bp->hclk); - clk_prepare_enable(bp->tx_clk); + if (bp->wol & MACB_WOL_ENABLED) { + macb_writel(bp, IDR, MACB_BIT(WOL)); + macb_writel(bp, WOL, 0); + disable_irq_wake(bp->queues[0].irq); + } else { + clk_prepare_enable(bp->pclk); + clk_prepare_enable(bp->hclk); + clk_prepare_enable(bp->tx_clk); + } netif_device_attach(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 0d4ecfcd60b7..8a13824ef802 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -312,6 +312,8 @@ #define MACB_PFR_SIZE 1 #define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */ #define MACB_PTZ_SIZE 1 +#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ +#define MACB_WOL_SIZE 1 /* Bitfields in MAN */ #define MACB_DATA_OFFSET 0 /* data */ @@ -398,7 +400,7 @@ /* Capability mask bits */ #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 -#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_FIFO_MODE 0x10000000 @@ -842,6 +844,8 @@ struct macb { unsigned int rx_frm_len_mask; unsigned int jumbo_max_len; + + u32 wol; }; static inline bool macb_is_gem(struct macb *bp) diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 8fb84e69c30e..0ef232d3331e 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -35,7 +35,7 @@ config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT select PHYLIB - select MDIO_OCTEON + select MDIO_THUNDER ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. @@ -53,4 +53,15 @@ config LIQUIDIO To compile this driver as a module, choose M here: the module will be called liquidio. This is recommended. +config OCTEON_MGMT_ETHERNET + tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" + depends on CAVIUM_OCTEON_SOC + select PHYLIB + select MDIO_OCTEON + default y + help + Enable the ethernet driver for the management + port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, + CN54XX, CN52XX, and CN6XXX chips. + endif # NET_VENDOR_CAVIUM diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile index d22f886ac291..872da9f7c31a 100644 --- a/drivers/net/ethernet/cavium/Makefile +++ b/drivers/net/ethernet/cavium/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/ obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/ +obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 872765527081..34d269cd5579 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1683,7 +1683,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); /* droq creation and local register settings. */ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); - if (ret_val == -1) + if (ret_val < 0) return ret_val; if (ret_val == 1) { @@ -2524,7 +2524,7 @@ static void handle_timestamp(struct octeon_device *oct, octeon_swap_8B_data(&resp->timestamp, 1); - if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { struct skb_shared_hwtstamps ts; u64 ns = resp->timestamp; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 4dba86eaa045..174072b3740b 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -983,5 +983,5 @@ int octeon_create_droq(struct octeon_device *oct, create_droq_fail: octeon_delete_droq(oct, q_no); - return -1; + return -ENOMEM; } diff --git a/drivers/net/ethernet/octeon/Makefile b/drivers/net/ethernet/cavium/octeon/Makefile index efa41c1d91c5..efa41c1d91c5 100644 --- a/drivers/net/ethernet/octeon/Makefile +++ b/drivers/net/ethernet/cavium/octeon/Makefile diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index c177c7cec13b..c177c7cec13b 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 688828865c48..83025bb4737c 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -116,6 +116,15 @@ #define NIC_PF_INTR_ID_MBOX0 8 #define NIC_PF_INTR_ID_MBOX1 9 +/* Minimum FIFO level before all packets for the CQ are dropped + * + * This value ensures that once a packet has been "accepted" + * for reception it will not get dropped due to non-availability + * of CQ descriptor. An errata in HW mandates this value to be + * atleast 0x100. + */ +#define NICPF_CQM_MIN_DROP_LEVEL 0x100 + /* Global timer for CQ timer thresh interrupts * Calculated for SCLK of 700Mhz * value written should be a 1/16th of what is expected @@ -248,10 +257,13 @@ struct nicvf_drv_stats { u64 rx_frames_jumbo; u64 rx_drops; + u64 rcv_buffer_alloc_failures; + /* Tx */ u64 tx_frames_ok; u64 tx_drops; u64 tx_tso; + u64 tx_timeout; u64 txq_stop; u64 txq_wake; }; @@ -260,45 +272,54 @@ struct nicvf { struct nicvf *pnicvf; struct net_device *netdev; struct pci_dev *pdev; + void __iomem *reg_base; + struct queue_set *qs; + struct nicvf_cq_poll *napi[8]; u8 vf_id; - u8 node; - u8 tns_mode:1; - u8 sqs_mode:1; - u8 loopback_supported:1; + u8 sqs_id; + bool sqs_mode; bool hw_tso; - u16 mtu; - struct queue_set *qs; + + /* Receive buffer alloc */ + u32 rb_page_offset; + u16 rb_pageref; + bool rb_alloc_fail; + bool rb_work_scheduled; + struct page *rb_page; + struct delayed_work rbdr_work; + struct tasklet_struct rbdr_task; + + /* Secondary Qset */ + u8 sqs_count; #define MAX_SQS_PER_VF_SINGLE_NODE 5 #define MAX_SQS_PER_VF 11 - u8 sqs_id; - u8 sqs_count; /* Secondary Qset count */ struct nicvf *snicvf[MAX_SQS_PER_VF]; + + /* Queue count */ u8 rx_queues; u8 tx_queues; u8 max_queues; - void __iomem *reg_base; + + u8 node; + u8 cpi_alg; + u16 mtu; bool link_up; u8 duplex; u32 speed; - struct page *rb_page; - u32 rb_page_offset; - bool rb_alloc_fail; - bool rb_work_scheduled; - struct delayed_work rbdr_work; - struct tasklet_struct rbdr_task; - struct tasklet_struct qs_err_task; - struct tasklet_struct cq_task; - struct nicvf_cq_poll *napi[8]; + bool tns_mode; + bool loopback_supported; struct nicvf_rss_info rss_info; - u8 cpi_alg; + struct tasklet_struct qs_err_task; + struct work_struct reset_task; + /* Interrupt coalescing settings */ u32 cq_coalesce_usecs; - u32 msg_enable; + + /* Stats */ struct nicvf_hw_stats hw_stats; struct nicvf_drv_stats drv_stats; struct bgx_stats bgx_stats; - struct work_struct reset_task; /* MSI-X */ bool msix_enabled; @@ -306,6 +327,7 @@ struct nicvf { struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; char irq_name[NIC_VF_MSIX_VECTORS][20]; bool irq_allocated[NIC_VF_MSIX_VECTORS]; + cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS]; /* VF <-> PF mailbox communication */ bool pf_acked; diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 4dded90076c8..95f17f8cadac 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) static void nic_init_hw(struct nicpf *nic) { int i; + u64 cqm_cfg; /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); @@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic) /* Enable VLAN ethertype matching and stripping */ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); + + /* Check if HW expected value is higher (could be in future chips) */ + cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); + if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) + nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); } /* Channel parse index configuration */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index dd536be20193..afb10e326b4f 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -21,7 +21,7 @@ #define NIC_PF_TCP_TIMER (0x0060) #define NIC_PF_BP_CFG (0x0080) #define NIC_PF_RRM_CFG (0x0088) -#define NIC_PF_CQM_CF (0x00A0) +#define NIC_PF_CQM_CFG (0x00A0) #define NIC_PF_CNM_CF (0x00A8) #define NIC_PF_CNM_STATUS (0x00B0) #define NIC_PF_CQ_AVG_CFG (0x00C0) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index a12b2e38cf61..d2d8ef270142 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -89,9 +89,11 @@ static const struct nicvf_stat nicvf_drv_stats[] = { NICVF_DRV_STAT(rx_frames_1518), NICVF_DRV_STAT(rx_frames_jumbo), NICVF_DRV_STAT(rx_drops), + NICVF_DRV_STAT(rcv_buffer_alloc_failures), NICVF_DRV_STAT(tx_frames_ok), NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_drops), + NICVF_DRV_STAT(tx_timeout), NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_wake), }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c24cb2a86a42..bfee298fc02a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -574,8 +574,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev, static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct napi_struct *napi, - struct cmp_queue *cq, - struct cqe_rx_t *cqe_rx, int cqe_type) + struct cqe_rx_t *cqe_rx) { struct sk_buff *skb; struct nicvf *nic = netdev_priv(netdev); @@ -591,7 +590,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, } /* Check for errors */ - err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); + err = nicvf_check_cqe_rx_errs(nic, cqe_rx); if (err && !cqe_rx->rb_cnt) return; @@ -682,8 +681,7 @@ loop: cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: - nicvf_rcv_pkt_handler(netdev, napi, cq, - cq_desc, CQE_TYPE_RX); + nicvf_rcv_pkt_handler(netdev, napi, cq_desc); work_done++; break; case CQE_TYPE_SEND: @@ -828,7 +826,7 @@ static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq) nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); /* Schedule NAPI */ - napi_schedule(&cq_poll->napi); + napi_schedule_irqoff(&cq_poll->napi); /* Clear interrupt */ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); @@ -899,6 +897,31 @@ static void nicvf_disable_msix(struct nicvf *nic) } } +static void nicvf_set_irq_affinity(struct nicvf *nic) +{ + int vec, cpu; + int irqnum; + + for (vec = 0; vec < nic->num_vec; vec++) { + if (!nic->irq_allocated[vec]) + continue; + + if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL)) + return; + /* CQ interrupts */ + if (vec < NICVF_INTR_ID_SQ) + /* Leave CPU0 for RBDR and other interrupts */ + cpu = nicvf_netdev_qidx(nic, vec) + 1; + else + cpu = 0; + + cpumask_set_cpu(cpumask_local_spread(cpu, nic->node), + nic->affinity_mask[vec]); + irqnum = nic->msix_entries[vec].vector; + irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]); + } +} + static int nicvf_register_interrupts(struct nicvf *nic) { int irq, ret = 0; @@ -944,8 +967,13 @@ static int nicvf_register_interrupts(struct nicvf *nic) ret = request_irq(nic->msix_entries[irq].vector, nicvf_qs_err_intr_handler, 0, nic->irq_name[irq], nic); - if (!ret) - nic->irq_allocated[irq] = true; + if (ret) + goto err; + + nic->irq_allocated[irq] = true; + + /* Set IRQ affinities */ + nicvf_set_irq_affinity(nic); err: if (ret) @@ -963,6 +991,9 @@ static void nicvf_unregister_interrupts(struct nicvf *nic) if (!nic->irq_allocated[irq]) continue; + irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL); + free_cpumask_var(nic->affinity_mask[irq]); + if (irq < NICVF_INTR_ID_SQ) free_irq(nic->msix_entries[irq].vector, nic->napi[irq]); else @@ -1125,7 +1156,6 @@ int nicvf_stop(struct net_device *netdev) /* Clear multiqset info */ nic->pnicvf = nic; - nic->sqs_count = 0; return 0; } @@ -1354,6 +1384,9 @@ void nicvf_update_stats(struct nicvf *nic) drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok; + drv_stats->rx_frames_ok = stats->rx_ucast_frames + + stats->rx_bcast_frames + + stats->rx_mcast_frames; drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun; drv_stats->tx_drops = stats->tx_drops; @@ -1394,6 +1427,7 @@ static void nicvf_tx_timeout(struct net_device *dev) netdev_warn(dev, "%s: Transmit timed out, resetting\n", dev->name); + nic->drv_stats.tx_timeout++; schedule_work(&nic->reset_task); } @@ -1538,6 +1572,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nicvf_send_vf_struct(nic); + if (!pass1_silicon(nic->pdev)) + nic->hw_tso = true; + /* Check if this VF is in QS only mode */ if (nic->sqs_mode) return 0; @@ -1557,9 +1594,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; - if (!pass1_silicon(nic->pdev)) - nic->hw_tso = true; - netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d0d1b5490061..fa05e347262f 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -18,6 +18,15 @@ #include "q_struct.h" #include "nicvf_queues.h" +static void nicvf_get_page(struct nicvf *nic) +{ + if (!nic->rb_pageref || !nic->rb_page) + return; + + atomic_add(nic->rb_pageref, &nic->rb_page->_count); + nic->rb_pageref = 0; +} + /* Poll a register for a specific value */ static int nicvf_poll_reg(struct nicvf *nic, int qidx, u64 reg, int bit_pos, int bits, int val) @@ -78,32 +87,32 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, u32 buf_len, u64 **rbuf) { - int order = get_order(buf_len); + int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; /* Check if request can be accomodated in previous allocated page */ - if (nic->rb_page) { - if ((nic->rb_page_offset + buf_len + buf_len) > - (PAGE_SIZE << order)) { - nic->rb_page = NULL; - } else { - nic->rb_page_offset += buf_len; - get_page(nic->rb_page); - } + if (nic->rb_page && + ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { + nic->rb_pageref++; + goto ret; } + nicvf_get_page(nic); + nic->rb_page = NULL; + /* Allocate a new page */ if (!nic->rb_page) { nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, order); if (!nic->rb_page) { - netdev_err(nic->netdev, - "Failed to allocate new rcv buffer\n"); + nic->drv_stats.rcv_buffer_alloc_failures++; return -ENOMEM; } nic->rb_page_offset = 0; } +ret: *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); + nic->rb_page_offset += buf_len; return 0; } @@ -159,6 +168,9 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, desc = GET_RBDR_DESC(rbdr, idx); desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; } + + nicvf_get_page(nic); + return 0; } @@ -242,6 +254,8 @@ refill: new_rb++; } + nicvf_get_page(nic); + /* make sure all memory stores are done before ringing doorbell */ smp_wmb(); @@ -1329,16 +1343,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) } /* Check for errors in the receive cmp.queue entry */ -int nicvf_check_cqe_rx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { struct nicvf_hw_stats *stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; - if (!cqe_rx->err_level && !cqe_rx->err_opcode) { - drv_stats->rx_frames_ok++; + if (!cqe_rx->err_level && !cqe_rx->err_opcode) return 0; - } if (netif_msg_rx_err(nic)) netdev_err(nic->netdev, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index c5030a7f213a..6673e1133523 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -338,8 +338,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, /* Stats */ void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); -int nicvf_check_cqe_rx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx); #endif /* NICVF_QUEUES_H */ diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9df26c2263bc..967951582e03 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -886,7 +886,8 @@ static void bgx_get_qlm_mode(struct bgx *bgx) #ifdef CONFIG_ACPI -static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) +static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, + u8 *dst) { u8 mac[ETH_ALEN]; int ret; @@ -897,10 +898,13 @@ static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) goto out; if (!is_valid_ether_addr(mac)) { + dev_err(dev, "MAC address invalid: %pM\n", mac); ret = -EINVAL; goto out; } + dev_info(dev, "MAC address set to: %pM\n", mac); + memcpy(dst, mac, ETH_ALEN); out: return ret; @@ -911,14 +915,15 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, u32 lvl, void *context, void **rv) { struct bgx *bgx = context; + struct device *dev = &bgx->pdev->dev; struct acpi_device *adev; if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; out: @@ -968,38 +973,63 @@ static int bgx_init_acpi_phy(struct bgx *bgx) static int bgx_init_of_phy(struct bgx *bgx) { - struct device_node *np; - struct device_node *np_child; + struct fwnode_handle *fwn; + struct device_node *node = NULL; u8 lmac = 0; - char bgx_sel[5]; - const char *mac; - - /* Get BGX node from DT */ - snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); - np = of_find_node_by_name(NULL, bgx_sel); - if (!np) - return -ENODEV; - - for_each_child_of_node(np, np_child) { - struct device_node *phy_np = of_parse_phandle(np_child, - "phy-handle", 0); - if (!phy_np) - continue; - bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); - mac = of_get_mac_address(np_child); + device_for_each_child_node(&bgx->pdev->dev, fwn) { + struct phy_device *pd; + struct device_node *phy_np; + const char *mac; + + /* Should always be an OF node. But if it is not, we + * cannot handle it, so exit the loop. + */ + node = to_of_node(fwn); + if (!node) + break; + + mac = of_get_mac_address(node); if (mac) ether_addr_copy(bgx->lmac[lmac].mac, mac); SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; + + phy_np = of_parse_phandle(node, "phy-handle", 0); + /* If there is no phy or defective firmware presents + * this cortina phy, for which there is no driver + * support, ignore it. + */ + if (phy_np && + !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { + /* Wait until the phy drivers are available */ + pd = of_phy_find_device(phy_np); + if (!pd) + goto defer; + bgx->lmac[lmac].phydev = pd; + } + lmac++; - if (lmac == MAX_LMAC_PER_BGX) { - of_node_put(np_child); + if (lmac == MAX_LMAC_PER_BGX) break; - } } + of_node_put(node); return 0; + +defer: + /* We are bailing out, try not to leak device reference counts + * for phy devices we may have already found. + */ + while (lmac) { + if (bgx->lmac[lmac].phydev) { + put_device(&bgx->lmac[lmac].phydev->mdio.dev); + bgx->lmac[lmac].phydev = NULL; + } + lmac--; + } + of_node_put(node); + return -EPROBE_DEFER; } #else @@ -1026,9 +1056,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct bgx *bgx = NULL; u8 lmac; - /* Load octeon mdio driver */ - octeon_mdiobus_force_mod_depencency(); - bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); if (!bgx) return -ENOMEM; diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index ee04caa6c4d8..a89721fad633 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable) return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } +static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val) +{ + char tok[len + 1]; + + memcpy(tok, s, len); + tok[len] = 0; + return kstrtouint(strim(tok), base, val); +} + +static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val) +{ + char tok[len + 1]; + + memcpy(tok, s, len); + tok[len] = 0; + return kstrtou16(strim(tok), base, val); +} + /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read @@ -709,19 +727,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) return ret; } - ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); + ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk); if (ret) return ret; - ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); + ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk); if (ret) return ret; - ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); + ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk); if (ret) return ret; - ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); + ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc); if (ret) return ret; - ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); + ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing); if (ret) return ret; memcpy(p->sn, vpd.sn_data, SERNUM_LEN); @@ -733,10 +751,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) } else { p->port_type[0] = hex_to_bin(vpd.port0_data[0]); p->port_type[1] = hex_to_bin(vpd.port1_data[0]); - ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); + ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16, + &p->xauicfg[0]); if (ret) return ret; - ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); + ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16, + &p->xauicfg[1]); if (ret) return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 646076e36bca..984a3cc26f86 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -720,6 +720,11 @@ struct doorbell_stats { u32 db_full; }; +struct hash_mac_addr { + struct list_head list; + u8 addr[ETH_ALEN]; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -758,6 +763,7 @@ struct adapter { void *uld_handle[CXGB4_ULD_MAX]; struct list_head list_node; struct list_head rcu_node; + struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ void *iscsi_ppm; @@ -1228,6 +1234,24 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static inline int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -1410,6 +1434,9 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, unsigned int naddr, + const u8 **addr, bool sleep_ok); int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, bool add_smt); int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1a1f1c89660b..d1e3f0997d6b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -338,84 +338,108 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); } +int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ +module_param(dbfifo_int_thresh, int, 0644); +MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); + /* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. + * usecs to sleep while draining the dbfifo */ -static int set_addr_filters(const struct net_device *dev, bool sleep) +static int dbfifo_drain_delay = 1000; +module_param(dbfifo_drain_delay, int, 0644); +MODULE_PARM_DESC(dbfifo_drain_delay, + "usecs to sleep while draining the dbfifo"); + +static inline int cxgb4_set_addr_hash(struct port_info *pi) { + struct adapter *adap = pi->adapter; + u64 vec = 0; + bool ucast = false; + struct hash_mac_addr *entry; + + /* Calculate the hash vector for the updated list and program it */ + list_for_each_entry(entry, &adap->mac_hlist, list) { + ucast |= is_unicast_ether_addr(entry->addr); + vec |= (1ULL << hash_mac_addr(entry->addr)); + } + return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, + vec, false); +} + +static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + int ret; u64 mhash = 0; u64 uhash = 0; - bool free = true; - u16 filt_idx[7]; - const u8 *addr[7]; - int ret, naddr = 0; - const struct netdev_hw_addr *ha; - int uc_cnt = netdev_uc_count(dev); - int mc_cnt = netdev_mc_count(dev); - const struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->pf; + bool free = false; + bool ucast = is_unicast_ether_addr(mac_addr); + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *new_entry; - /* first do the secondary unicast addresses */ - netdev_for_each_uc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &uhash, sleep); - if (ret < 0) - return ret; - - free = false; - naddr = 0; - } + ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, + NULL, ucast ? &uhash : &mhash, false); + if (ret < 0) + goto out; + /* if hash != 0, then add the addr to hash addr list + * so on the end we will calculate the hash for the + * list and program it + */ + if (uhash || mhash) { + new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, mac_addr); + list_add_tail(&new_entry->list, &adap->mac_hlist); + ret = cxgb4_set_addr_hash(pi); } +out: + return ret < 0 ? ret : 0; +} - /* next set up the multicast addresses */ - netdev_for_each_mc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &mhash, sleep); - if (ret < 0) - return ret; +static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + int ret; + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *entry, *tmp; - free = false; - naddr = 0; + /* If the MAC address to be removed is in the hash addr + * list, delete it from the list and update hash vector + */ + list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { + if (ether_addr_equal(entry->addr, mac_addr)) { + list_del(&entry->list); + kfree(entry); + return cxgb4_set_addr_hash(pi); } } - return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, - uhash | mhash, sleep); + ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); + return ret < 0 ? -EINVAL : 0; } -int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ -module_param(dbfifo_int_thresh, int, 0644); -MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); - -/* - * usecs to sleep while draining the dbfifo - */ -static int dbfifo_drain_delay = 1000; -module_param(dbfifo_drain_delay, int, 0644); -MODULE_PARM_DESC(dbfifo_drain_delay, - "usecs to sleep while draining the dbfifo"); - /* * Set Rx properties of a port, such as promiscruity, address filters, and MTU. * If @mtu is -1 it is left unchanged. */ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { - int ret; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu, - (dev->flags & IFF_PROMISC) ? 1 : 0, - (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, - sleep_ok); - return ret; + if (!(dev->flags & IFF_PROMISC)) { + __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + if (!(dev->flags & IFF_ALLMULTI)) + __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + } + + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, + sleep_ok); } /** @@ -2725,6 +2749,8 @@ static int cxgb_up(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) update_clip(adap); #endif + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adap->mac_hlist); out: return err; irq_err: diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 3ba4b0cc8094..13b144bcf725 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2229,7 +2229,7 @@ static int process_responses(struct sge_rspq *q, int budget) budget_left--; } - if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) + if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) __refill_fl(q->adap, &rxq->fl); return budget - budget_left; } @@ -2615,8 +2615,18 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | FW_IQ_CMD_FL0CONGCIF_F | FW_IQ_CMD_FL0CONGEN_F); + /* In T6, for egress queue type FL there is internal overhead + * of 16B for header going into FLM module. Hence the maximum + * allowed burst size is 448 bytes. For T4/T5, the hardware + * doesn't coalesce fetch requests if more than 64 bytes of + * Free List pointers are provided, so we use a 128-byte Fetch + * Burst Minimum there (T6 implements coalescing so we can use + * the smaller 64-byte value there). + */ c.fl0dcaen_to_fl0cidxfthresh = - htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) | + htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? + FETCHBURSTMIN_128B_X : + FETCHBURSTMIN_64B_X) | FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 636b4691f252..cc1736bece0f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4433,23 +4433,6 @@ void t4_intr_disable(struct adapter *adapter) } /** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by HW inexact - * (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - -/** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command @@ -6738,6 +6721,81 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, } /** + * t4_free_mac_filt - frees exact-match filters of given MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @sleep_ok: call is allowed to sleep + * + * Frees the exact-match filter for each of the supplied addresses + * + * Returns a negative error number or the number of filters freed. + */ +int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, unsigned int naddr, + const u8 **addr, bool sleep_ok) +{ + int offset, ret = 0; + struct fw_vi_mac_cmd c; + unsigned int nfilters = 0; + unsigned int max_naddr = is_t4(adap->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int rem = naddr; + + if (naddr > max_naddr) + return -EINVAL; + + for (offset = 0; offset < (int)naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) + ? rem + : ARRAY_SIZE(c.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + break; + + for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (index < max_naddr) + nfilters++; + } + + offset += fw_naddr; + rem -= fw_naddr; + } + + if (ret == 0) + ret = nfilters; + return ret; +} + +/** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index a5641e2f4557..80417fc564d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1216,6 +1216,8 @@ struct cpl_l2t_write_req { #define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S) #define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U) +#define CPL_L2T_VLAN_NONE 0xfff + struct cpl_l2t_write_rpl { union opcode_tid ot; u8 status; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index a8dda635456d..06bc2d2e7a73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -165,6 +165,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index a5231fa771db..36cf3073ca37 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -65,6 +65,7 @@ #define TIMERREG_COUNTER0_X 0 #define FETCHBURSTMIN_64B_X 2 +#define FETCHBURSTMIN_128B_X 3 #define FETCHBURSTMAX_256B_X 2 #define FETCHBURSTMAX_512B_X 3 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 6049f70e110c..4a707c32d76f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -348,6 +348,11 @@ struct sge { #define for_each_ethrxq(sge, iter) \ for (iter = 0; iter < (sge)->ethqsets; iter++) +struct hash_mac_addr { + struct list_head list; + u8 addr[ETH_ALEN]; +}; + /* * Per-"adapter" (Virtual Function) information. */ @@ -381,6 +386,9 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + + /* list of MAC addresses in MPS Hash */ + struct list_head mac_hlist; }; enum { /* adapter flags */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0cfa5d72cafd..1cc8a7a69457 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -741,6 +741,9 @@ static int adapter_up(struct adapter *adapter) */ enable_rx(adapter); t4vf_sge_start(adapter); + + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adapter->mac_hlist); return 0; } @@ -787,10 +790,6 @@ static int cxgb4vf_open(struct net_device *dev) /* * Note that this interface is up and start everything up ... */ - netif_set_real_num_tx_queues(dev, pi->nqsets); - err = netif_set_real_num_rx_queues(dev, pi->nqsets); - if (err) - goto err_unwind; err = link_start(dev); if (err) goto err_unwind; @@ -859,97 +858,74 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) return ns; } -/* - * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting - * at a specified offset within the list, into an array of addrss pointers and - * return the number collected. - */ -static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int offset, - unsigned int maxaddrs) +static inline int cxgb4vf_set_addr_hash(struct port_info *pi) { - unsigned int index = 0; - unsigned int naddr = 0; - const struct netdev_hw_addr *ha; - - for_each_dev_addr(dev, ha) - if (index++ >= offset) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } - return naddr; -} - -/* - * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting - * at a specified offset within the list, into an array of addrss pointers and - * return the number collected. - */ -static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int offset, - unsigned int maxaddrs) -{ - unsigned int index = 0; - unsigned int naddr = 0; - const struct netdev_hw_addr *ha; + struct adapter *adapter = pi->adapter; + u64 vec = 0; + bool ucast = false; + struct hash_mac_addr *entry; - netdev_for_each_mc_addr(ha, dev) - if (index++ >= offset) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } - return naddr; + /* Calculate the hash vector for the updated list and program it */ + list_for_each_entry(entry, &adapter->mac_hlist, list) { + ucast |= is_unicast_ether_addr(entry->addr); + vec |= (1ULL << hash_mac_addr(entry->addr)); + } + return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false); } -/* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. - */ -static int set_addr_filters(const struct net_device *dev, bool sleep) +static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr) { + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + int ret; u64 mhash = 0; u64 uhash = 0; - bool free = true; - unsigned int offset, naddr; - const u8 *addr[7]; - int ret; - const struct port_info *pi = netdev_priv(dev); - - /* first do the secondary unicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_uc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; - - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &uhash, sleep); - if (ret < 0) - return ret; + bool free = false; + bool ucast = is_unicast_ether_addr(mac_addr); + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *new_entry; - free = false; + ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist, + NULL, ucast ? &uhash : &mhash, false); + if (ret < 0) + goto out; + /* if hash != 0, then add the addr to hash addr list + * so on the end we will calculate the hash for the + * list and program it + */ + if (uhash || mhash) { + new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, mac_addr); + list_add_tail(&new_entry->list, &adapter->mac_hlist); + ret = cxgb4vf_set_addr_hash(pi); } +out: + return ret < 0 ? ret : 0; +} - /* next set up the multicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_mc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; +static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + int ret; + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *entry, *tmp; - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &mhash, sleep); - if (ret < 0) - return ret; - free = false; + /* If the MAC address to be removed is in the hash addr + * list, delete it from the list and update hash vector + */ + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { + if (ether_addr_equal(entry->addr, mac_addr)) { + list_del(&entry->list); + kfree(entry); + return cxgb4vf_set_addr_hash(pi); + } } - return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, - uhash | mhash, sleep); + ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false); + return ret < 0 ? -EINVAL : 0; } /* @@ -958,16 +934,18 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) */ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { - int ret; struct port_info *pi = netdev_priv(dev); - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1, - (dev->flags & IFF_PROMISC) != 0, - (dev->flags & IFF_ALLMULTI) != 0, - 1, -1, sleep_ok); - return ret; + if (!(dev->flags & IFF_PROMISC)) { + __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); + if (!(dev->flags & IFF_ALLMULTI)) + __dev_mc_sync(dev, cxgb4vf_mac_sync, + cxgb4vf_mac_unsync); + } + return t4vf_set_rxmode(pi->adapter, pi->viid, -1, + (dev->flags & IFF_PROMISC) != 0, + (dev->flags & IFF_ALLMULTI) != 0, + 1, -1, sleep_ok); } /* @@ -2194,6 +2172,73 @@ static void cleanup_debugfs(struct adapter *adapter) /* nothing to do */ } +/* Figure out how many Ports and Queue Sets we can support. This depends on + * knowing our Virtual Function Resources and may be called a second time if + * we fall back from MSI-X to MSI Interrupt Mode. + */ +static void size_nports_qsets(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + unsigned int ethqsets, pmask_nports; + + /* The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d maximum" + " allowed virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* We may have been provisioned with more VIs than the number of + * ports we're allowed to access (our Port Access Rights Mask). + * This is obviously a configuration conflict but we don't want to + * crash the kernel or anything silly just because of that. + */ + pmask_nports = hweight32(adapter->params.vfres.pmask); + if (pmask_nports < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d provissioned" + " virtual interfaces; limited by Port Access Rights" + " mask %#x\n", pmask_nports, adapter->params.nports, + adapter->params.vfres.pmask); + adapter->params.nports = pmask_nports; + } + + /* We need to reserve an Ingress Queue for the Asynchronous Firmware + * Event Queue. And if we're using MSI Interrupts, we'll also need to + * reserve an Ingress Queue for a Forwarded Interrupts. + * + * The rest of the FL/Intr-capable ingress queues will be matched up + * one-for-one with Ethernet/Control egress queues in order to form + * "Queue Sets" which will be aportioned between the "ports". For + * each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + * + * Note that even if we're currently configured to use MSI-X + * Interrupts (module variable msi == MSI_MSIX) we may get downgraded + * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that + * happens we'll need to adjust things later. + */ + ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI); + if (vfres->nethctrl != ethqsets) + ethqsets = min(vfres->nethctrl, ethqsets); + if (vfres->neq < ethqsets*2) + ethqsets = vfres->neq/2; + if (ethqsets > MAX_ETH_QSETS) + ethqsets = MAX_ETH_QSETS; + adapter->sge.max_ethqsets = ethqsets; + + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } +} + /* * Perform early "adapter" initialization. This is where we discover what * adapter parameters we're going to be using and initialize basic adapter @@ -2201,24 +2246,12 @@ static void cleanup_debugfs(struct adapter *adapter) */ static int adap_init0(struct adapter *adapter) { - struct vf_resources *vfres = &adapter->params.vfres; struct sge_params *sge_params = &adapter->params.sge; struct sge *s = &adapter->sge; - unsigned int ethqsets; int err; u32 param, val = 0; /* - * Wait for the device to become ready before proceeding ... - */ - err = t4vf_wait_dev_ready(adapter); - if (err) { - dev_err(adapter->pdev_dev, "device didn't become ready:" - " err=%d\n", err); - return err; - } - - /* * Some environments do not properly handle PCIE FLRs -- e.g. in Linux * 2.6.31 and later we can't call pci_reset_function() in order to * issue an FLR because of a self- deadlock on the device semaphore. @@ -2323,69 +2356,23 @@ static int adap_init0(struct adapter *adapter) return err; } - /* - * The number of "ports" which we support is equal to the number of - * Virtual Interfaces with which we've been provisioned. - */ - adapter->params.nports = vfres->nvi; - if (adapter->params.nports > MAX_NPORTS) { - dev_warn(adapter->pdev_dev, "only using %d of %d allowed" - " virtual interfaces\n", MAX_NPORTS, - adapter->params.nports); - adapter->params.nports = MAX_NPORTS; - } - - /* - * We need to reserve a number of the ingress queues with Free List - * and Interrupt capabilities for special interrupt purposes (like - * asynchronous firmware messages, or forwarded interrupts if we're - * using MSI). The rest of the FL/Intr-capable ingress queues will be - * matched up one-for-one with Ethernet/Control egress queues in order - * to form "Queue Sets" which will be aportioned between the "ports". - * For each Queue Set, we'll need the ability to allocate two Egress - * Contexts -- one for the Ingress Queue Free List and one for the TX - * Ethernet Queue. - */ - ethqsets = vfres->niqflint - INGQ_EXTRAS; - if (vfres->nethctrl != ethqsets) { - dev_warn(adapter->pdev_dev, "unequal number of [available]" - " ingress/egress queues (%d/%d); using minimum for" - " number of Queue Sets\n", ethqsets, vfres->nethctrl); - ethqsets = min(vfres->nethctrl, ethqsets); - } - if (vfres->neq < ethqsets*2) { - dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)" - " to support Queue Sets (%d); reducing allowed Queue" - " Sets\n", vfres->neq, ethqsets); - ethqsets = vfres->neq/2; - } - if (ethqsets > MAX_ETH_QSETS) { - dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue" - " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets); - ethqsets = MAX_ETH_QSETS; - } - if (vfres->niq != 0 || vfres->neq > ethqsets*2) { - dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)" - " ignored\n", vfres->niq, vfres->neq - ethqsets*2); - } - adapter->sge.max_ethqsets = ethqsets; - - /* - * Check for various parameter sanity issues. Most checks simply - * result in us using fewer resources than our provissioning but we - * do need at least one "port" with which to work ... - */ - if (adapter->sge.max_ethqsets < adapter->params.nports) { - dev_warn(adapter->pdev_dev, "only using %d of %d available" - " virtual interfaces (too few Queue Sets)\n", - adapter->sge.max_ethqsets, adapter->params.nports); - adapter->params.nports = adapter->sge.max_ethqsets; + /* Check for various parameter sanity issues */ + if (adapter->params.vfres.pmask == 0) { + dev_err(adapter->pdev_dev, "no port access configured\n" + "usable!\n"); + return -EINVAL; } - if (adapter->params.nports == 0) { + if (adapter->params.vfres.nvi == 0) { dev_err(adapter->pdev_dev, "no virtual interfaces configured/" "usable!\n"); return -EINVAL; } + + /* Initialize nports and max_ethqsets now that we have our Virtual + * Function Resources. + */ + size_nports_qsets(adapter); + return 0; } @@ -2799,6 +2786,40 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } } + /* See what interrupts we'll be using. If we've been configured to + * use MSI-X interrupts, try to enable them but fall back to using + * MSI interrupts if we can't enable MSI-X interrupts. If we can't + * get MSI interrupts we bail with the error. + */ + if (msi == MSI_MSIX && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else { + if (msi == MSI_MSIX) { + dev_info(adapter->pdev_dev, + "Unable to use MSI-X Interrupts; falling " + "back to MSI Interrupts\n"); + + /* We're going to need a Forwarded Interrupt Queue so + * that may cut into how many Queue Sets we can + * support. + */ + msi = MSI_MSI; + size_nports_qsets(adapter); + } + err = pci_enable_msi(pdev); + if (err) { + dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;" + " err=%d\n", err); + goto err_free_dev; + } + adapter->flags |= USING_MSI; + } + + /* Now that we know how many "ports" we have and what interrupt + * mechanism we're going to use, we can configure our queue resources. + */ + cfg_queues(adapter); + /* * The "card" is now ready to go. If any errors occur during device * registration we do not fail the whole "card" but rather proceed @@ -2806,10 +2827,14 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * must register at least one net device. */ for_each_port(adapter, pidx) { + struct port_info *pi = netdev_priv(adapter->port[pidx]); netdev = adapter->port[pidx]; if (netdev == NULL) continue; + netif_set_real_num_tx_queues(netdev, pi->nqsets); + netif_set_real_num_rx_queues(netdev, pi->nqsets); + err = register_netdev(netdev); if (err) { dev_warn(&pdev->dev, "cannot register net device %s," @@ -2821,7 +2846,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } if (adapter->registered_device_map == 0) { dev_err(&pdev->dev, "could not register any net devices\n"); - goto err_free_dev; + goto err_disable_interrupts; } /* @@ -2839,32 +2864,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } /* - * See what interrupts we'll be using. If we've been configured to - * use MSI-X interrupts, try to enable them but fall back to using - * MSI interrupts if we can't enable MSI-X interrupts. If we can't - * get MSI interrupts we bail with the error. - */ - if (msi == MSI_MSIX && enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; - else { - err = pci_enable_msi(pdev); - if (err) { - dev_err(&pdev->dev, "Unable to allocate %s interrupts;" - " err=%d\n", - msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); - goto err_free_debugfs; - } - adapter->flags |= USING_MSI; - } - - /* - * Now that we know how many "ports" we have and what their types are, - * and how many Queue Sets we can support, we can configure our queue - * resources. - */ - cfg_queues(adapter); - - /* * Print a short notice on the existence and configuration of the new * VF network device ... */ @@ -2884,11 +2883,13 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Error recovery and exit code. Unwind state that's been created * so far and return the error. */ - -err_free_debugfs: - if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { - cleanup_debugfs(adapter); - debugfs_remove_recursive(adapter->debugfs_root); +err_disable_interrupts: + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; } err_free_dev: diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 6528231d8a59..1ccd282949a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1864,7 +1864,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) * for new buffer pointers, refill the Free List. */ if (rspq->offset >= 0 && - rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) + fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) __refill_fl(rspq->adapter, &rxq->fl); return budget - budget_left; } @@ -2300,9 +2300,20 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | FW_IQ_CMD_FL0PACKEN_F | FW_IQ_CMD_FL0PADEN_F); + + /* In T6, for egress queue type FL there is internal overhead + * of 16B for header going into FLM module. Hence the maximum + * allowed burst size is 448 bytes. For T4/T5, the hardware + * doesn't coalesce fetch requests if more than 64 bytes of + * Free List pointers are provided, so we use a 128-byte Fetch + * Burst Minimum there (T6 implements coalescing so we can use + * the smaller 64-byte value there). + */ cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( - FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? + FETCHBURSTMIN_128B_X : + FETCHBURSTMIN_64B_X) | FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); @@ -2607,7 +2618,6 @@ int t4vf_sge_init(struct adapter *adapter) u32 fl0 = sge_params->sge_fl_buffer_size[0]; u32 fl1 = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; - unsigned int ingpadboundary, ingpackboundary, ingpad_shift; /* * Start by vetting the basic SGE parameters which have been set up by @@ -2619,7 +2629,8 @@ int t4vf_sge_init(struct adapter *adapter) fl0, fl1); return -EINVAL; } - if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) { + if ((sge_params->sge_control & RXPKTCPLMODE_F) != + RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); return -EINVAL; } @@ -2632,41 +2643,7 @@ int t4vf_sge_init(struct adapter *adapter) s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); - - /* T4 uses a single control field to specify both the PCIe Padding and - * Packing Boundary. T5 introduced the ability to specify these - * separately. The actual Ingress Packet Data alignment boundary - * within Packed Buffer Mode is the maximum of these two - * specifications. (Note that it makes no real practical sense to - * have the Pading Boudary be larger than the Packing Boundary but you - * could set the chip up that way and, in fact, legacy T4 code would - * end doing this because it would initialize the Padding Boundary and - * leave the Packing Boundary initialized to 0 (16 bytes).) - * Padding Boundary values in T6 starts from 8B, - * where as it is 32B for T4 and T5. - */ - if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) - ingpad_shift = INGPADBOUNDARY_SHIFT_X; - else - ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; - - ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) + - ingpad_shift); - if (is_t4(adapter->params.chip)) { - s->fl_align = ingpadboundary; - } else { - /* T5 has a different interpretation of one of the PCIe Packing - * Boundary values. - */ - ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); - if (ingpackboundary == INGPACKBOUNDARY_16B_X) - ingpackboundary = 16; - else - ingpackboundary = 1 << (ingpackboundary + - INGPACKBOUNDARY_SHIFT_X); - - s->fl_align = max(ingpadboundary, ingpackboundary); - } + s->fl_align = t4vf_fl_pkt_align(adapter); /* A FL with <= fl_starve_thres buffers is starving and a periodic * timer will attempt to refill it. This needs to be larger than the diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 88b8981b4751..9b40a85cc1e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -285,12 +285,31 @@ static inline int is_t4(enum chip_type chip) return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by hardware + * inexact (hash) address matching. + */ +static inline int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + int t4vf_wait_dev_ready(struct adapter *); int t4vf_port_init(struct adapter *, int); int t4vf_fw_reset(struct adapter *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); +int t4vf_fl_pkt_align(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4vf_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, @@ -320,6 +339,8 @@ int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, bool); int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int, const u8 **, u16 *, u64 *, bool); +int t4vf_free_mac_filt(struct adapter *, unsigned int, unsigned int naddr, + const u8 **, bool); int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool); int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool); int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index b6fa74aafe47..fed83d88fc4e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -236,23 +236,6 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, return -ETIMEDOUT; } -/** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by hardware - * inexact (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) @@ -435,6 +418,61 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, } /** + * t4vf_fl_pkt_align - return the fl packet alignment + * @adapter: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. And T6 changes the + * Ingress Padding Boundary Shift, so it's all a mess and it's best + * if we put this in low-level Common Code ... + * + */ +int t4vf_fl_pkt_align(struct adapter *adapter) +{ + u32 sge_control, sge_control2; + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + sge_control = adapter->params.sge.sge_control; + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + * Padding Boundary values in T6 starts from 8B, + * where as it is 32B for T4 and T5. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + ingpad_shift = INGPADBOUNDARY_SHIFT_X; + else + ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; + + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adapter->params.chip)) { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + sge_control2 = adapter->params.sge.sge_control2; + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + +/** * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID @@ -1266,6 +1304,77 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, } /** + * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses + * @adapter: the adapter + * @viid: the VI id + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @sleep_ok: call is allowed to sleep + * + * Frees the exact-match filter for each of the supplied addresses + * + * Returns a negative error number or the number of filters freed. + */ +int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, + unsigned int naddr, const u8 **addr, bool sleep_ok) +{ + int offset, ret = 0; + struct fw_vi_mac_cmd cmd; + unsigned int nfilters = 0; + unsigned int max_naddr = adapter->params.arch.mps_tcam_size; + unsigned int rem = naddr; + + if (naddr > max_naddr) + return -EINVAL; + + for (offset = 0; offset < (int)naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? + rem : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, + sleep_ok); + if (ret) + break; + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (index < max_naddr) + nfilters++; + } + + offset += fw_naddr; + rem -= fw_naddr; + } + + if (ret == 0) + ret = nfilters; + return ret; +} + +/** * t4vf_change_mac - modifies the exact-match filter for a MAC address * @adapter: the adapter * @viid: the Virtual Interface ID diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 7ba6d530b0c0..130f910e4785 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -201,16 +201,20 @@ static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) } /* wrappers function for kernel log - * Make sure variable vdev of struct vnic_dev is available in the block where - * these macros are used */ -#define vdev_info(args...) dev_info(&vdev->pdev->dev, args) -#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args) -#define vdev_err(args...) dev_err(&vdev->pdev->dev, args) - -#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args) -#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args) -#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args) +#define vdev_err(vdev, fmt, ...) \ + dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) +#define vdev_warn(vdev, fmt, ...) \ + dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) +#define vdev_info(vdev, fmt, ...) \ + dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) + +#define vdev_neterr(vdev, fmt, ...) \ + netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) +#define vdev_netwarn(vdev, fmt, ...) \ + netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) +#define vdev_netinfo(vdev, fmt, ...) \ + netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) static inline struct device *enic_get_dev(struct enic *enic) { diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c index abeda2a9ea27..9c682aff3834 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -43,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { - vdev_err("Failed to hook CQ[%d] resource\n", index); + vdev_err(vdev, "Failed to hook CQ[%d] resource\n", index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 1fdf5fe12a95..8f27df3207bc 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -53,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, return -EINVAL; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { - vdev_err("vNIC BAR0 res hdr length error\n"); + vdev_err(vdev, "vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; mrh = bar->vaddr; if (!rh) { - vdev_err("vNIC BAR0 res hdr not mem-mapped\n"); + vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } @@ -69,7 +69,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, (ioread32(&rh->version) != VNIC_RES_VERSION)) { if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { - vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, MGMTVNIC_MAGIC, MGMTVNIC_VERSION, ioread32(&rh->magic), ioread32(&rh->version)); @@ -106,7 +106,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar[bar_num].len) { - vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", + vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", type, bar_offset, len, bar[bar_num].len); return -EINVAL; @@ -198,7 +198,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { - vdev_err("Failed to allocate ring (size=%d), aborting\n", + vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n", (int)ring->size); return -ENOMEM; } @@ -241,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -ENODEV; } if (status & STAT_BUSY) { - vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd)); + vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } @@ -275,7 +275,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - vdev_neterr("Error %d devcmd %d\n", + vdev_neterr(vdev, "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } @@ -290,7 +290,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, } } - vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd)); + vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; } @@ -313,7 +313,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, new_posted = (posted + 1) % DEVCMD2_RING_SIZE; if (new_posted == fetch_index) { - vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", + vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", _CMD_N(cmd), fetch_index, posted); return -EBUSY; } @@ -352,7 +352,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, err = result->error; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - vdev_neterr("Error %d devcmd %d\n", + vdev_neterr(vdev, "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } @@ -365,7 +365,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, udelay(100); } - vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd)); + vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd)); return -ETIMEDOUT; } @@ -401,7 +401,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ - vdev_err("Fatal error in devcmd2 init - hardware surprise removal"); + vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n"); return -ENODEV; } @@ -474,8 +474,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - vdev_neterr("Error %d proxy devcmd %d\n", err, - _CMD_N(cmd)); + vdev_neterr(vdev, "Error %d proxy devcmd %d\n", + err, _CMD_N(cmd)); return err; } @@ -768,7 +768,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) - vdev_neterr("Can't set packet filter\n"); + vdev_neterr(vdev, "Can't set packet filter\n"); return err; } @@ -785,7 +785,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) - vdev_neterr("Can't add addr [%pM], %d\n", addr, err); + vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err); return err; } @@ -802,7 +802,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) - vdev_neterr("Can't del addr [%pM], %d\n", addr, err); + vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err); return err; } @@ -846,7 +846,8 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) dma_addr_t notify_pa; if (vdev->notify || vdev->notify_pa) { - vdev_neterr("notify block %p still allocated", vdev->notify); + vdev_neterr(vdev, "notify block %p still allocated\n", + vdev->notify); return -EINVAL; } @@ -965,7 +966,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) */ if ((err == ERR_ECMDUNKNOWN) || (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { - vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n"); + vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(vdev); return 0; } @@ -1103,16 +1104,16 @@ int vnic_devcmd_init(struct vnic_dev *vdev) if (res) { err = vnic_dev_init_devcmd2(vdev); if (err) - vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1", + vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n", err); else return 0; } else { - vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); + vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); } err = vnic_dev_init_devcmd1(vdev); if (err) - vdev_err("DEVCMD1 initialization failed: %d", err); + vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err); return err; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c index 942759d9cb3c..23604e3d4455 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_intr.c +++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c @@ -40,7 +40,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { - vdev_err("Failed to hook INTR[%d].ctrl resource\n", index); + vdev_err(vdev, "Failed to hook INTR[%d].ctrl resource\n", + index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index cce2777dfc41..e572a527b18d 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -92,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); if (!rq->ctrl) { - vdev_err("Failed to hook RQ[%d] resource\n", index); + vdev_err(vdev, "Failed to hook RQ[%d] resource\n", index); return -EINVAL; } @@ -179,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq) udelay(10); } - vdev_neterr("Failed to disable RQ[%d]\n", rq->index); + vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c index 05ad16a7e872..090cc65658a3 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -95,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { - vdev_err("Failed to hook WQ[%d] resource\n", index); + vdev_err(vdev, "Failed to hook WQ[%d] resource\n", index); return -EINVAL; } @@ -187,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq) udelay(10); } - vdev_neterr("Failed to disable WQ[%d]\n", wq->index); + vdev_neterr(vdev, "Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index cf94b72dbacd..48d91941408d 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -128,7 +128,6 @@ struct board_info { struct resource *data_res; struct resource *addr_req; /* resources requested */ struct resource *data_req; - struct resource *irq_res; int irq_wake; @@ -1300,22 +1299,16 @@ static int dm9000_open(struct net_device *dev) { struct board_info *db = netdev_priv(dev); - unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); - /* If there is no IRQ type specified, default to something that - * may work, and tell the user that this is a problem */ - - if (irqflags == IRQF_TRIGGER_NONE) - irqflags = irq_get_trigger_type(dev->irq); - - if (irqflags == IRQF_TRIGGER_NONE) + /* If there is no IRQ type specified, tell the user that this is a + * problem + */ + if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); - irqflags |= IRQF_SHARED; - /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ @@ -1323,7 +1316,8 @@ dm9000_open(struct net_device *dev) /* Initialize DM9000 board */ dm9000_init_dm9000(dev); - if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) + if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, + dev->name, dev)) return -EAGAIN; /* Now that we have an interrupt handler hooked up we can unmask * our interrupts @@ -1500,15 +1494,22 @@ dm9000_probe(struct platform_device *pdev) db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (db->addr_res == NULL || db->data_res == NULL || - db->irq_res == NULL) { - dev_err(db->dev, "insufficient resources\n"); + if (!db->addr_res || !db->data_res) { + dev_err(db->dev, "insufficient resources addr=%p data=%p\n", + db->addr_res, db->data_res); ret = -ENOENT; goto out; } + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + dev_err(db->dev, "interrupt resource unavailable: %d\n", + ndev->irq); + ret = ndev->irq; + goto out; + } + db->irq_wake = platform_get_irq(pdev, 1); if (db->irq_wake >= 0) { dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); @@ -1570,7 +1571,6 @@ dm9000_probe(struct platform_device *pdev) /* fill in parameters for net-dev structure */ ndev->base_addr = (unsigned long)db->io_addr; - ndev->irq = db->irq_res->start; /* ensure at least we have a default set of IO routines */ dm9000_set_io(db, iosize); diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index b553409e04ad..94d0eebef129 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -505,9 +505,7 @@ media_picked: tp->timer.expires = RUN_AT(next_tick); add_timer(&tp->timer); #ifdef CONFIG_TULIP_NAPI - init_timer(&tp->oom_timer); - tp->oom_timer.data = (unsigned long)dev; - tp->oom_timer.function = oom_timer; + setup_timer(&tp->oom_timer, oom_timer, (unsigned long)dev); #endif } @@ -782,9 +780,8 @@ static void tulip_down (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); - init_timer(&tp->timer); - tp->timer.data = (unsigned long)dev; - tp->timer.function = tulip_tbl[tp->chip_id].media_timer; + setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer, + (unsigned long)dev); dev->if_port = tp->saved_if_port; @@ -1475,9 +1472,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->csr0 = csr0; spin_lock_init(&tp->lock); spin_lock_init(&tp->mii_lock); - init_timer(&tp->timer); - tp->timer.data = (unsigned long)dev; - tp->timer.function = tulip_tbl[tp->chip_id].media_timer; + setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer, + (unsigned long)dev); INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index cf837831304b..fe3763df3f13 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -72,6 +72,9 @@ #define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \ (ETH_HLEN + ETH_FCS_LEN)) +/* Accommodate for QnQ configurations where VLAN insertion is enabled in HW */ +#define BE_MAX_GSO_SIZE (65535 - 2 * VLAN_HLEN) + #define BE_NUM_VLANS_SUPPORTED 64 #define BE_MAX_EQD 128u #define BE_MAX_TX_FRAG_COUNT 30 @@ -89,6 +92,10 @@ #define BE3_MAX_TX_QS 16 #define BE3_MAX_EVT_QS 16 #define BE3_SRIOV_MAX_EVT_QS 8 +#define SH_VF_MAX_NIC_EQS 3 /* Skyhawk VFs can have a max of 4 EQs + * and at least 1 is granted to either + * SURF/DPDK + */ #define MAX_RSS_IFACES 15 #define MAX_RX_QS 32 @@ -111,6 +118,8 @@ #define RSS_INDIR_TABLE_LEN 128 #define RSS_HASH_KEY_LEN 40 +#define BE_UNKNOWN_PHY_STATE 0xFF + struct be_dma_mem { void *va; dma_addr_t dma; @@ -118,27 +127,27 @@ struct be_dma_mem { }; struct be_queue_info { + u32 len; + u32 entry_size; /* Size of an element in the queue */ + u32 tail, head; + atomic_t used; /* Number of valid elements in the queue */ + u32 id; struct be_dma_mem dma_mem; - u16 len; - u16 entry_size; /* Size of an element in the queue */ - u16 id; - u16 tail, head; bool created; - atomic_t used; /* Number of valid elements in the queue */ }; -static inline u32 MODULO(u16 val, u16 limit) +static inline u32 MODULO(u32 val, u32 limit) { BUG_ON(limit & (limit - 1)); return val & (limit - 1); } -static inline void index_adv(u16 *index, u16 val, u16 limit) +static inline void index_adv(u32 *index, u32 val, u32 limit) { *index = MODULO((*index + val), limit); } -static inline void index_inc(u16 *index, u16 limit) +static inline void index_inc(u32 *index, u32 limit) { *index = MODULO((*index + 1), limit); } @@ -163,7 +172,7 @@ static inline void queue_head_inc(struct be_queue_info *q) index_inc(&q->head, q->len); } -static inline void index_dec(u16 *index, u16 limit) +static inline void index_dec(u32 *index, u32 limit) { *index = MODULO((*index - 1), limit); } @@ -386,13 +395,17 @@ enum vf_state { #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7) #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) #define BE_FLAGS_SETUP_DONE BIT(9) -#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) +#define BE_FLAGS_PHY_MISCONFIGURED BIT(10) #define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) #define BE_FLAGS_OS2BMC BIT(12) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 +#define MAX_ERR_RECOVERY_RETRY_COUNT 3 +#define ERR_DETECTION_DELAY 1000 +#define ERR_RECOVERY_RETRY_DELAY 30000 + /* Ethtool set_dump flags */ #define LANCER_INITIATE_FW_DUMP 0x1 #define LANCER_DELETE_FW_DUMP 0x2 @@ -530,7 +543,9 @@ struct be_adapter { u16 work_counter; struct delayed_work be_err_detection_work; + u8 recovery_retries; u8 err_flags; + bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */ u32 flags; u32 cmd_privileges; /* Ethtool knobs and info */ @@ -594,6 +609,7 @@ struct be_adapter { u32 bmc_filt_mask; u32 fat_dump_len; u16 serial_num[CNTL_SERIAL_NUM_WORDS]; + u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */ }; #define be_physfn(adapter) (!adapter->virtfn) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index b63d8ad2e115..22402db275f2 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -19,19 +19,25 @@ #include "be.h" #include "be_cmds.h" -static char *be_port_misconfig_evt_desc[] = { - "A valid SFP module detected", - "Optics faulted/ incorrectly installed/ not installed.", - "Optics of two types installed.", - "Incompatible optics.", - "Unknown port SFP status" +char *be_misconfig_evt_port_state[] = { + "Physical Link is functional", + "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.", + "Optics of two types installed – Remove one optic or install matching pair of optics.", + "Incompatible optics – Replace with compatible optics for card to function.", + "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.", + "Uncertified optics – Replace with Avago-certified optics to enable link operation." }; -static char *be_port_misconfig_remedy_desc[] = { - "", - "Reseat optics. If issue not resolved, replace", - "Remove one optic or install matching pair of optics", - "Replace with compatible optics for card to function", +static char *be_port_misconfig_evt_severity[] = { + "KERN_WARN", + "KERN_INFO", + "KERN_ERR", + "KERN_WARN" +}; + +static char *phy_state_oper_desc[] = { + "Link is non-operational", + "Link is operational", "" }; @@ -65,7 +71,22 @@ static struct be_cmd_priv_map cmd_priv_map[] = { CMD_SUBSYSTEM_COMMON, BE_PRIV_LNKMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG | BE_PRIV_DEVSEC - } + }, + { + OPCODE_LOWLEVEL_HOST_DDR_DMA, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_LOWLEVEL_LOOPBACK_TEST, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, }; static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) @@ -236,7 +257,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (base_status != MCC_STATUS_SUCCESS && !be_skip_err_log(opcode, base_status, addl_status)) { - if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { + if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST || + addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) { dev_warn(&adapter->pdev->dev, "VF is not privileged to issue opcode %d-%d\n", opcode, subsystem); @@ -281,22 +303,56 @@ static void be_async_port_misconfig_event_process(struct be_adapter *adapter, { struct be_async_event_misconfig_port *evt = (struct be_async_event_misconfig_port *)compl; - u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1); + u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1); + u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2); + u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE; struct device *dev = &adapter->pdev->dev; - u8 port_misconfig_evt; + u8 msg_severity = DEFAULT_MSG_SEVERITY; + u8 phy_state_info; + u8 new_phy_state; + + new_phy_state = + (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff; + + if (new_phy_state == adapter->phy_state) + return; + + adapter->phy_state = new_phy_state; + + /* for older fw that doesn't populate link effect data */ + if (!sfp_misconfig_evt_word2) + goto log_message; - port_misconfig_evt = - ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff); + phy_state_info = + (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff; + if (phy_state_info & PHY_STATE_INFO_VALID) { + msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1; + + if (be_phy_unqualified(new_phy_state)) + phy_oper_state = (phy_state_info & PHY_STATE_OPER); + } + +log_message: /* Log an error message that would allow a user to determine * whether the SFPs have an issue */ - dev_info(dev, "Port %c: %s %s", adapter->port_name, - be_port_misconfig_evt_desc[port_misconfig_evt], - be_port_misconfig_remedy_desc[port_misconfig_evt]); + if (be_phy_state_unknown(new_phy_state)) + dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, + "Port %c: Unrecognized Optics state: 0x%x. %s", + adapter->port_name, + new_phy_state, + phy_state_oper_desc[phy_oper_state]); + else + dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, + "Port %c: %s %s", + adapter->port_name, + be_misconfig_evt_port_state[new_phy_state], + phy_state_oper_desc[phy_oper_state]); - if (port_misconfig_evt == INCOMPATIBLE_SFP) - adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP; + /* Log Vendor name and part no. if a misconfigured SFP is detected */ + if (be_phy_misconfigured(new_phy_state)) + adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED; } /* Grp5 CoS Priority evt */ @@ -540,7 +596,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) int status; struct be_mcc_wrb *wrb; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - u16 index = mcc_obj->q.head; + u32 index = mcc_obj->q.head; struct be_cmd_resp_hdr *resp; index_dec(&index, mcc_obj->q.len); @@ -1497,34 +1553,25 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, return status; } -/* Uses MCCQ */ +/* Uses MCCQ if available else MBOX */ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) { - struct be_mcc_wrb *wrb; + struct be_mcc_wrb wrb = {0}; struct be_cmd_req_if_destroy *req; int status; if (interface_id == -1) return 0; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - req = embedded_payload(wrb); + req = embedded_payload(&wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_DESTROY, - sizeof(*req), wrb, NULL); + sizeof(*req), &wrb, NULL); req->hdr.domain = domain; req->interface_id = cpu_to_le32(interface_id); - status = be_mcc_notify_wait(adapter); -err: - spin_unlock_bh(&adapter->mcc_lock); + status = be_cmd_notify_wait(adapter, &wrb); return status; } @@ -3168,6 +3215,10 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_set_lmode *req; int status; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3213,6 +3264,10 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, struct be_cmd_resp_loopback_test *resp; int status; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3259,6 +3314,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, int status; int i, j = 0; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 241819b36ca7..d8540ae95e5a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -68,7 +68,8 @@ enum mcc_addl_status { MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a, MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab, MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56, - MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57 + MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57, + MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60 }; #define CQE_BASE_STATUS_MASK 0xFFFF @@ -175,10 +176,53 @@ struct be_async_event_qnq { u32 flags; } __packed; -#define INCOMPATIBLE_SFP 0x3 +enum { + BE_PHY_FUNCTIONAL = 0, + BE_PHY_NOT_PRESENT = 1, + BE_PHY_DIFF_MEDIA = 2, + BE_PHY_INCOMPATIBLE = 3, + BE_PHY_UNQUALIFIED = 4, + BE_PHY_UNCERTIFIED = 5 +}; + +#define PHY_STATE_MSG_SEVERITY 0x6 +#define PHY_STATE_OPER 0x1 +#define PHY_STATE_INFO_VALID 0x80 +#define PHY_STATE_OPER_MSG_NONE 0x2 +#define DEFAULT_MSG_SEVERITY 0x1 + +#define be_phy_state_unknown(phy_state) (phy_state > BE_PHY_UNCERTIFIED) +#define be_phy_unqualified(phy_state) \ + (phy_state == BE_PHY_UNQUALIFIED || \ + phy_state == BE_PHY_UNCERTIFIED) +#define be_phy_misconfigured(phy_state) \ + (phy_state == BE_PHY_INCOMPATIBLE || \ + phy_state == BE_PHY_UNQUALIFIED || \ + phy_state == BE_PHY_UNCERTIFIED) + +extern char *be_misconfig_evt_port_state[]; + /* async event indicating misconfigured port */ struct be_async_event_misconfig_port { + /* DATA_WORD1: + * phy state of port 0: bits 7 - 0 + * phy state of port 1: bits 15 - 8 + * phy state of port 2: bits 23 - 16 + * phy state of port 3: bits 31 - 24 + */ u32 event_data_word1; + /* DATA_WORD2: + * phy state info of port 0: bits 7 - 0 + * phy state info of port 1: bits 15 - 8 + * phy state info of port 2: bits 23 - 16 + * phy state info of port 3: bits 31 - 24 + * + * PHY STATE INFO: + * Link operability :bit 0 + * Message severity :bit 2 - 1 + * Rsvd :bits 6 - 3 + * phy state info valid :bit 7 + */ u32 event_data_word2; u32 rsvd0; u32 flags; @@ -622,10 +666,13 @@ enum be_if_flags { BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_MCAST_PROMISCUOUS) -#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) +#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \ + BE_IF_FLAGS_PASS_L3L4_ERRORS | \ + BE_IF_FLAGS_UNTAGGED) -#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) +#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \ + BE_IF_FLAGS_MULTICAST | \ + BE_IF_FLAGS_ALL_PROMISCUOUS) /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index a19ac441336f..2ff691636dac 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -720,29 +720,32 @@ static int be_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; switch (state) { case ETHTOOL_ID_ACTIVE: - be_cmd_get_beacon_state(adapter, adapter->hba_port_num, - &adapter->beacon_state); - return 1; /* cycle on/off once per second */ + status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num, + &adapter->beacon_state); + if (status) + return be_cmd_status(status); + return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - BEACON_STATE_ENABLED); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, BEACON_STATE_ENABLED); break; case ETHTOOL_ID_OFF: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - BEACON_STATE_DISABLED); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, BEACON_STATE_DISABLED); break; case ETHTOOL_ID_INACTIVE: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - adapter->beacon_state); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, adapter->beacon_state); } - return 0; + return be_cmd_status(status); } static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index f99de3657ce3..536686476369 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = { "Unknown" }; +#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \ + BE_IF_FLAGS_BROADCAST | \ + BE_IF_FLAGS_MULTICAST | \ + BE_IF_FLAGS_PASS_L3L4_ERRORS) + static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; @@ -849,9 +854,9 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, } /* Grab a WRB header for xmit */ -static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) +static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo) { - u16 head = txo->q.head; + u32 head = txo->q.head; queue_head_inc(&txo->q); return head; @@ -895,7 +900,7 @@ static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. */ static void be_xmit_restore(struct be_adapter *adapter, - struct be_tx_obj *txo, u16 head, bool map_single, + struct be_tx_obj *txo, u32 head, bool map_single, u32 copied) { struct device *dev; @@ -930,7 +935,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, struct device *dev = &adapter->pdev->dev; struct be_queue_info *txq = &txo->q; bool map_single = false; - u16 head = txq->head; + u32 head = txq->head; dma_addr_t busaddr; int len; @@ -1123,6 +1128,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, struct be_wrb_params *wrb_params) { + int err; + /* Lancer, SH and BE3 in SRIOV mode have a bug wherein * packets that are 32b or less may cause a transmit stall * on that port. The workaround is to pad such packets @@ -1139,6 +1146,13 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, return NULL; } + /* The stack can send us skbs with length greater than + * what the HW can handle. Trim the extra bytes. + */ + WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE); + err = pskb_trim(skb, BE_MAX_GSO_SIZE); + WARN_ON(err); + return skb; } @@ -1463,6 +1477,9 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) if (lancer_chip(adapter) && vid == 0) return 0; + if (!test_bit(vid, adapter->vids)) + return 0; + clear_bit(vid, adapter->vids); adapter->vlans_added--; @@ -1914,8 +1931,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo) if (!aic->enable) return 0; - if (time_before_eq(now, aic->jiffies) || - jiffies_to_msecs(now - aic->jiffies) < 1) + if (jiffies_to_msecs(now - aic->jiffies) < 1) eqd = aic->prev_eqd; else eqd = be_get_new_eqd(eqo); @@ -1988,7 +2004,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *rx_page_info; struct be_queue_info *rxq = &rxo->q; - u16 frag_idx = rxq->tail; + u32 frag_idx = rxq->tail; rx_page_info = &rxo->page_info_tbl[frag_idx]; BUG_ON(!rx_page_info->page); @@ -2399,10 +2415,11 @@ static u16 be_tx_compl_process(struct be_adapter *adapter, { struct sk_buff **sent_skbs = txo->sent_skb_list; struct be_queue_info *txq = &txo->q; - u16 frag_index, num_wrbs = 0; struct sk_buff *skb = NULL; bool unmap_skb_hdr = false; struct be_eth_wrb *wrb; + u16 num_wrbs = 0; + u32 frag_index; do { if (sent_skbs[txq->tail]) { @@ -2514,10 +2531,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) static void be_tx_compl_clean(struct be_adapter *adapter) { - u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; struct device *dev = &adapter->pdev->dev; + u16 cmpl = 0, timeo = 0, num_wrbs = 0; struct be_tx_compl_info *txcp; struct be_queue_info *txq; + u32 end_idx, notified_idx; struct be_tx_obj *txo; int i, pending_txqs; @@ -3363,6 +3381,7 @@ done: static void be_rx_qs_destroy(struct be_adapter *adapter) { + struct rss_info *rss = &adapter->rss_info; struct be_queue_info *q; struct be_rx_obj *rxo; int i; @@ -3389,6 +3408,12 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) } be_queue_free(adapter, q); } + + if (rss->rss_flags) { + rss->rss_flags = RSS_ENABLE_NONE; + be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + 128, rss->rss_hkey); + } } static void be_disable_if_filters(struct be_adapter *adapter) @@ -3509,20 +3534,21 @@ static int be_rx_qs_create(struct be_adapter *adapter) if (!BEx_chip(adapter)) rss->rss_flags |= RSS_ENABLE_UDP_IPV4 | RSS_ENABLE_UDP_IPV6; + + netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); + rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + RSS_INDIR_TABLE_LEN, rss_key); + if (rc) { + rss->rss_flags = RSS_ENABLE_NONE; + return rc; + } + + memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); } else { /* Disable RSS, if only default RX Q is created */ rss->rss_flags = RSS_ENABLE_NONE; } - netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); - rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, - RSS_INDIR_TABLE_LEN, rss_key); - if (rc) { - rss->rss_flags = RSS_ENABLE_NONE; - return rc; - } - - memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); /* Post 1 less than RXQ-len to avoid head being equal to tail, * which is a queue empty condition @@ -3537,7 +3563,7 @@ static int be_enable_if_filters(struct be_adapter *adapter) { int status; - status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); + status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON); if (status) return status; @@ -3789,18 +3815,15 @@ static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) struct be_resources res = adapter->pool_res; u16 num_vf_qs = 1; - /* Distribute the queue resources equally among the PF and it's VFs + /* Distribute the queue resources among the PF and it's VFs * Do not distribute queue resources in multi-channel configuration. */ if (num_vfs && !be_is_mc(adapter)) { - /* If number of VFs requested is 8 less than max supported, - * assign 8 queue pairs to the PF and divide the remaining - * resources evenly among the VFs - */ - if (num_vfs < (be_max_vfs(adapter) - 8)) - num_vf_qs = (res.max_rss_qs - 8) / num_vfs; - else - num_vf_qs = res.max_rss_qs / num_vfs; + /* Divide the qpairs evenly among the VFs and the PF, capped + * at VF-EQ-count. Any remainder qpairs belong to the PF. + */ + num_vf_qs = min(SH_VF_MAX_NIC_EQS, + res.max_rss_qs / (num_vfs + 1)); /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable * interfaces per port. Provide RSS on VFs, only if number @@ -3857,8 +3880,7 @@ static int be_vfs_if_create(struct be_adapter *adapter) int status; /* If a FW profile exists, then cap_flags are updated */ - cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; + cap_flags = BE_VF_IF_EN_FLAGS; for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { @@ -3874,10 +3896,8 @@ static int be_vfs_if_create(struct be_adapter *adapter) } } - en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | - BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | - BE_IF_FLAGS_PASS_L3L4_ERRORS); + /* PF should enable IF flags during proxy if_create call */ + en_flags = cap_flags & BE_VF_IF_EN_FLAGS; status = be_cmd_if_create(adapter, cap_flags, en_flags, &vf_cfg->if_handle, vf + 1); if (status) @@ -4082,6 +4102,7 @@ static void be_setup_init(struct be_adapter *adapter) adapter->if_handle = -1; adapter->be3_native = false; adapter->if_flags = 0; + adapter->phy_state = BE_UNKNOWN_PHY_STATE; if (be_physfn(adapter)) adapter->cmd_privileges = MAX_PRIVILEGES; else @@ -4265,10 +4286,10 @@ static void be_schedule_worker(struct be_adapter *adapter) adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } -static void be_schedule_err_detection(struct be_adapter *adapter) +static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay) { schedule_delayed_work(&adapter->be_err_detection_work, - msecs_to_jiffies(1000)); + msecs_to_jiffies(delay)); adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; } @@ -4307,6 +4328,23 @@ err: return status; } +static int be_if_create(struct be_adapter *adapter) +{ + u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + u32 cap_flags = be_if_cap_flags(adapter); + int status; + + if (adapter->cfg_num_qs == 1) + cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); + + en_flags &= cap_flags; + /* will enable all the needed filter flags in be_open() */ + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + &adapter->if_handle, 0); + + return status; +} + int be_update_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -4324,6 +4362,9 @@ int be_update_queues(struct be_adapter *adapter) be_msix_disable(adapter); be_clear_queues(adapter); + status = be_cmd_if_destroy(adapter, adapter->if_handle, 0); + if (status) + return status; if (!msix_enabled(adapter)) { status = be_msix_enable(adapter); @@ -4331,6 +4372,10 @@ int be_update_queues(struct be_adapter *adapter) return status; } + status = be_if_create(adapter); + if (status) + return status; + status = be_setup_queues(adapter); if (status) return status; @@ -4395,7 +4440,6 @@ static int be_func_init(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; - u32 en_flags; int status; status = be_func_init(adapter); @@ -4428,10 +4472,7 @@ static int be_setup(struct be_adapter *adapter) goto err; /* will enable all the needed filter flags in be_open() */ - en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; - en_flags = en_flags & be_if_cap_flags(adapter); - status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, - &adapter->if_handle, 0); + status = be_if_create(adapter); if (status) goto err; @@ -4589,6 +4630,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { + /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */ + if (!pci_sriov_get_totalvfs(adapter->pdev)) + return 0; hsw_mode = PORT_FWD_TYPE_VEB; } else { status = be_cmd_get_hsw_config(adapter, NULL, 0, @@ -4804,7 +4848,7 @@ static void be_netdev_init(struct net_device *netdev) netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; - if (be_multi_rxq(adapter)) + if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) netdev->hw_features |= NETIF_F_RXHASH; netdev->features |= netdev->hw_features | @@ -4817,7 +4861,7 @@ static void be_netdev_init(struct net_device *netdev) netdev->flags |= IFF_MULTICAST; - netif_set_gso_max_size(netdev, 65535 - ETH_HLEN); + netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN); netdev->netdev_ops = &be_netdev_ops; @@ -4859,21 +4903,27 @@ static int be_resume(struct be_adapter *adapter) static int be_err_recover(struct be_adapter *adapter) { - struct device *dev = &adapter->pdev->dev; int status; + /* Error recovery is supported only Lancer as of now */ + if (!lancer_chip(adapter)) + return -EIO; + + /* Wait for adapter to reach quiescent state before + * destroying queues + */ + status = be_fw_wait_ready(adapter); + if (status) + goto err; + + be_cleanup(adapter); + status = be_resume(adapter); if (status) goto err; - dev_info(dev, "Adapter recovery successful\n"); return 0; err: - if (be_physfn(adapter)) - dev_err(dev, "Adapter recovery failed\n"); - else - dev_err(dev, "Re-trying adapter recovery\n"); - return status; } @@ -4882,21 +4932,43 @@ static void be_err_detection_task(struct work_struct *work) struct be_adapter *adapter = container_of(work, struct be_adapter, be_err_detection_work.work); - int status = 0; + struct device *dev = &adapter->pdev->dev; + int recovery_status; + int delay = ERR_DETECTION_DELAY; be_detect_error(adapter); - if (be_check_error(adapter, BE_ERROR_HW)) { - be_cleanup(adapter); - - /* As of now error recovery support is in Lancer only */ - if (lancer_chip(adapter)) - status = be_err_recover(adapter); + if (be_check_error(adapter, BE_ERROR_HW)) + recovery_status = be_err_recover(adapter); + else + goto reschedule_task; + + if (!recovery_status) { + adapter->recovery_retries = 0; + dev_info(dev, "Adapter recovery successful\n"); + goto reschedule_task; + } else if (be_virtfn(adapter)) { + /* For VFs, check if PF have allocated resources + * every second. + */ + dev_err(dev, "Re-trying adapter recovery\n"); + goto reschedule_task; + } else if (adapter->recovery_retries++ < + MAX_ERR_RECOVERY_RETRY_COUNT) { + /* In case of another error during recovery, it takes 30 sec + * for adapter to come out of error. Retry error recovery after + * this time interval. + */ + dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n"); + delay = ERR_RECOVERY_RETRY_DELAY; + goto reschedule_task; + } else { + dev_err(dev, "Adapter recovery failed\n"); } - /* Always attempt recovery on VFs */ - if (!status || be_virtfn(adapter)) - be_schedule_err_detection(adapter); + return; +reschedule_task: + be_schedule_err_detection(adapter, delay); } static void be_log_sfp_info(struct be_adapter *adapter) @@ -4906,11 +4978,13 @@ static void be_log_sfp_info(struct be_adapter *adapter) status = be_cmd_query_sfp_info(adapter); if (!status) { dev_err(&adapter->pdev->dev, - "Unqualified SFP+ detected on %c from %s part no: %s", - adapter->port_name, adapter->phy.vendor_name, + "Port %c: %s Vendor: %s part no: %s", + adapter->port_name, + be_misconfig_evt_port_state[adapter->phy_state], + adapter->phy.vendor_name, adapter->phy.vendor_pn); } - adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; + adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED; } static void be_worker(struct work_struct *work) @@ -4954,7 +5028,7 @@ static void be_worker(struct work_struct *work) if (!skyhawk_chip(adapter)) be_eqd_update(adapter, false); - if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) + if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED) be_log_sfp_info(adapter); reschedule: @@ -4968,6 +5042,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); + if (adapter->pcicfg && adapter->pcicfg_mapped) + pci_iounmap(adapter->pdev, adapter->pcicfg); } static int db_bar(struct be_adapter *adapter) @@ -5019,8 +5095,10 @@ static int be_map_pci_bars(struct be_adapter *adapter) if (!addr) goto pci_map_err; adapter->pcicfg = addr; + adapter->pcicfg_mapped = true; } else { adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + adapter->pcicfg_mapped = false; } } @@ -5292,7 +5370,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) be_roce_dev_add(adapter); - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); /* On Die temperature not supported for VF. */ if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) { @@ -5359,7 +5437,7 @@ static int be_pci_resume(struct pci_dev *pdev) if (status) return status; - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); if (adapter->wol_en) be_setup_wol(adapter, false); @@ -5395,6 +5473,8 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, dev_err(&adapter->pdev->dev, "EEH error detected\n"); + be_roce_dev_remove(adapter); + if (!be_check_error(adapter, BE_ERROR_EEH)) { be_set_error(adapter, BE_ERROR_EEH); @@ -5459,7 +5539,9 @@ static void be_eeh_resume(struct pci_dev *pdev) if (status) goto err; - be_schedule_err_detection(adapter); + be_roce_dev_add(adapter); + + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); return; err: dev_err(&adapter->pdev->dev, "EEH resume failed\n"); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 62fa136554ac..41b010645100 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev) if (priv->mdio) { mdiobus_unregister(priv->mdio); - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } if (priv->clk) diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index b1026689b78f..1f23845a0694 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -43,20 +43,21 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32)); /* In case dst is not aligned we need an intermediate buffer */ - if (dst_is_aligned) - for (i = 0; i < len; i++, reg++) - *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); + if (dst_is_aligned) { + ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); + reg += len; + } else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - put_unaligned(buf, reg); + put_unaligned_be32(buf, reg); } } - /* copy last bytes (if any) */ if (last) { - u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - memcpy((u8*)reg, &buf, last); + u32 buf; + ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); + memcpy((u8 *)reg, &buf, last); } } @@ -66,26 +67,28 @@ static u32 nps_enet_rx_handler(struct net_device *ndev) u32 work_done = 0; struct nps_enet_priv *priv = netdev_priv(ndev); struct sk_buff *skb; - struct nps_enet_rx_ctl rx_ctrl; + u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); + u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; + u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT; + u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT; - rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); - frame_len = rx_ctrl.nr; + frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT; /* Check if we got RX */ - if (!rx_ctrl.cr) + if (!rx_ctrl_cr) return work_done; /* If we got here there is a work for us */ work_done++; /* Check Rx error */ - if (rx_ctrl.er) { + if (rx_ctrl_er) { ndev->stats.rx_errors++; err = 1; } /* Check Rx CRC error */ - if (rx_ctrl.crc) { + if (rx_ctrl_crc) { ndev->stats.rx_crc_errors++; ndev->stats.rx_dropped++; err = 1; @@ -136,23 +139,24 @@ rx_irq_frame_done: static void nps_enet_tx_handler(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_tx_ctl tx_ctrl; - - tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; + u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; + u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; /* Check if we got TX */ - if (!priv->tx_packet_sent || tx_ctrl.ct) + if (!priv->tx_packet_sent || tx_ctrl_ct) return; /* Ack Tx ctrl register */ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0); /* Check Tx transmit error */ - if (unlikely(tx_ctrl.et)) { + if (unlikely(tx_ctrl_et)) { ndev->stats.tx_errors++; } else { ndev->stats.tx_packets++; - ndev->stats.tx_bytes += tx_ctrl.nt; + ndev->stats.tx_bytes += tx_ctrl_nt; } dev_kfree_skb(priv->tx_skb); @@ -178,13 +182,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) nps_enet_tx_handler(ndev); work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { - struct nps_enet_buf_int_enable buf_int_enable; + u32 buf_int_enable_value = 0; napi_complete(napi); - buf_int_enable.rx_rdy = NPS_ENET_ENABLE; - buf_int_enable.tx_done = NPS_ENET_ENABLE; + + /* set tx_done and rx_rdy bits */ + buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; + buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; + nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, - buf_int_enable.value); + buf_int_enable_value); } return work_done; @@ -205,13 +212,12 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) { struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_rx_ctl rx_ctrl; - struct nps_enet_tx_ctl tx_ctrl; - - rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); - tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; + u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; - if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr) + if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); @@ -223,22 +229,24 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) static void nps_enet_set_hw_mac_address(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_mac_cfg_1 ge_mac_cfg_1; - struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2; + u32 ge_mac_cfg_1_value = 0; + u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; /* set MAC address in HW */ - ge_mac_cfg_1.octet_0 = ndev->dev_addr[0]; - ge_mac_cfg_1.octet_1 = ndev->dev_addr[1]; - ge_mac_cfg_1.octet_2 = ndev->dev_addr[2]; - ge_mac_cfg_1.octet_3 = ndev->dev_addr[3]; - ge_mac_cfg_2->octet_4 = ndev->dev_addr[4]; - ge_mac_cfg_2->octet_5 = ndev->dev_addr[5]; + ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT; + ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT; + ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT; + ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK) + | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK) + | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1, - ge_mac_cfg_1.value); + ge_mac_cfg_1_value); nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, - ge_mac_cfg_2->value); + *ge_mac_cfg_2_value); } /** @@ -254,93 +262,97 @@ static void nps_enet_set_hw_mac_address(struct net_device *ndev) static void nps_enet_hw_reset(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_rst ge_rst; - struct nps_enet_phase_fifo_ctl phase_fifo_ctl; + u32 ge_rst_value = 0, phase_fifo_ctl_value = 0; - ge_rst.value = 0; - phase_fifo_ctl.value = 0; /* Pcs reset sequence*/ - ge_rst.gmac_0 = NPS_ENET_ENABLE; - nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value); + ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT; + nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); usleep_range(10, 20); - ge_rst.value = 0; - nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value); + nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); /* Tx fifo reset sequence */ - phase_fifo_ctl.rst = NPS_ENET_ENABLE; - phase_fifo_ctl.init = NPS_ENET_ENABLE; + phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT; + phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, - phase_fifo_ctl.value); + phase_fifo_ctl_value); usleep_range(10, 20); - phase_fifo_ctl.value = 0; + phase_fifo_ctl_value = 0; nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, - phase_fifo_ctl.value); + phase_fifo_ctl_value); } static void nps_enet_hw_enable_control(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_mac_cfg_0 ge_mac_cfg_0; - struct nps_enet_buf_int_enable buf_int_enable; - struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2; - struct nps_enet_ge_mac_cfg_3 *ge_mac_cfg_3 = &priv->ge_mac_cfg_3; + u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0; + u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; + u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value; s32 max_frame_length; - ge_mac_cfg_0.value = 0; - buf_int_enable.value = 0; /* Enable Rx and Tx statistics */ - ge_mac_cfg_2->stat_en = NPS_ENET_GE_MAC_CFG_2_STAT_EN; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK) + | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT; /* Discard packets with different MAC address */ - ge_mac_cfg_2->disc_da = NPS_ENET_ENABLE; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; /* Discard multicast packets */ - ge_mac_cfg_2->disc_mc = NPS_ENET_ENABLE; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, - ge_mac_cfg_2->value); + *ge_mac_cfg_2_value); /* Discard Packets bigger than max frame length */ max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN; - if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) - ge_mac_cfg_3->max_len = max_frame_length; + if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) { + *ge_mac_cfg_3_value = + (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK) + | max_frame_length << CFG_3_MAX_LEN_SHIFT; + } /* Enable interrupts */ - buf_int_enable.rx_rdy = NPS_ENET_ENABLE; - buf_int_enable.tx_done = NPS_ENET_ENABLE; + buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; + buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, - buf_int_enable.value); + buf_int_enable_value); /* Write device MAC address to HW */ nps_enet_set_hw_mac_address(ndev); /* Rx and Tx HW features */ - ge_mac_cfg_0.tx_pad_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_crc_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.rx_crc_strip = NPS_ENET_ENABLE; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT; /* IFG configuration */ - ge_mac_cfg_0.rx_ifg = NPS_ENET_GE_MAC_CFG_0_RX_IFG; - ge_mac_cfg_0.tx_ifg = NPS_ENET_GE_MAC_CFG_0_TX_IFG; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT; /* preamble configuration */ - ge_mac_cfg_0.rx_pr_check_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_pr_len = NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT; /* enable flow control frames */ - ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR; - ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT; + *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK) + | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT; /* Enable Rx and Tx */ - ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3, - ge_mac_cfg_3->value); + *ge_mac_cfg_3_value); nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, - ge_mac_cfg_0.value); + ge_mac_cfg_0_value); } static void nps_enet_hw_disable_control(struct net_device *ndev) @@ -358,31 +370,28 @@ static void nps_enet_send_frame(struct net_device *ndev, struct sk_buff *skb) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_tx_ctl tx_ctrl; + u32 tx_ctrl_value = 0; short length = skb->len; u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); u32 *src = (void *)skb->data; bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); - tx_ctrl.value = 0; /* In case src is not aligned we need an intermediate buffer */ if (src_is_aligned) - for (i = 0; i < len; i++, src++) - nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); + iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len); else /* !src_is_aligned */ for (i = 0; i < len; i++, src++) nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, - get_unaligned(src)); + get_unaligned_be32(src)); /* Write the length of the Frame */ - tx_ctrl.nt = length; + tx_ctrl_value |= length << TX_CTL_NT_SHIFT; /* Indicate SW is done */ priv->tx_packet_sent = true; - tx_ctrl.ct = NPS_ENET_ENABLE; - + tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; /* Send Frame */ - nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl.value); + nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); } /** @@ -422,19 +431,23 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p) static void nps_enet_set_rx_mode(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2; - - ge_mac_cfg_2.value = priv->ge_mac_cfg_2.value; + u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value; if (ndev->flags & IFF_PROMISC) { - ge_mac_cfg_2.disc_da = NPS_ENET_DISABLE; - ge_mac_cfg_2.disc_mc = NPS_ENET_DISABLE; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) + | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) + | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT; + } else { - ge_mac_cfg_2.disc_da = NPS_ENET_ENABLE; - ge_mac_cfg_2.disc_mc = NPS_ENET_ENABLE; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; + } - nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2.value); + nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value); } /** @@ -453,12 +466,15 @@ static s32 nps_enet_open(struct net_device *ndev) /* Reset private variables */ priv->tx_packet_sent = false; - priv->ge_mac_cfg_2.value = 0; - priv->ge_mac_cfg_3.value = 0; + priv->ge_mac_cfg_2_value = 0; + priv->ge_mac_cfg_3_value = 0; /* ge_mac_cfg_3 default values */ - priv->ge_mac_cfg_3.rx_ifg_th = NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH; - priv->ge_mac_cfg_3.max_len = NPS_ENET_GE_MAC_CFG_3_MAX_LEN; + priv->ge_mac_cfg_3_value |= + NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT; + + priv->ge_mac_cfg_3_value |= + NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT; /* Disable HW device */ nps_enet_hw_disable_control(ndev); diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h index 6703674d679c..d0cab600bce8 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.h +++ b/drivers/net/ethernet/ezchip/nps_enet.h @@ -43,233 +43,123 @@ #define NPS_ENET_REG_GE_RST 0x1400 #define NPS_ENET_REG_PHASE_FIFO_CTL 0x1404 -/* Tx control register */ -struct nps_enet_tx_ctl { - union { - /* ct: SW sets to indicate frame ready in Tx buffer for - * transmission. HW resets to when transmission done - * et: Transmit error - * nt: Length in bytes of Tx frame loaded to Tx buffer - */ - struct { - u32 - __reserved_1:16, - ct:1, - et:1, - __reserved_2:3, - nt:11; - }; - - u32 value; - }; -}; - -/* Rx control register */ -struct nps_enet_rx_ctl { - union { - /* cr: HW sets to indicate frame ready in Rx buffer. - * SW resets to indicate host read received frame - * and new frames can be written to Rx buffer - * er: Rx error indication - * crc: Rx CRC error indication - * nr: Length in bytes of Rx frame loaded by MAC to Rx buffer - */ - struct { - u32 - __reserved_1:16, - cr:1, - er:1, - crc:1, - __reserved_2:2, - nr:11; - }; - - u32 value; - }; -}; - -/* Interrupt enable for data buffer events register */ -struct nps_enet_buf_int_enable { - union { - /* tx_done: Interrupt generation in the case when new frame - * is ready in Rx buffer - * rx_rdy: Interrupt generation in the case when current frame - * was read from TX buffer - */ - struct { - u32 - __reserved:30, - tx_done:1, - rx_rdy:1; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 0 register */ -struct nps_enet_ge_mac_cfg_0 { - union { - /* tx_pr_len: Transmit preamble length in bytes - * tx_ifg_nib: Tx idle pattern - * nib_mode: Nibble (4-bit) Mode - * rx_pr_check_en: Receive preamble Check Enable - * tx_ifg: Transmit inter-Frame Gap - * rx_ifg: Receive inter-Frame Gap - * tx_fc_retr: Transmit Flow Control Retransmit Mode - * rx_length_check_en: Receive Length Check Enable - * rx_crc_ignore: Results of the CRC check are ignored - * rx_crc_strip: MAC strips the CRC from received frames - * rx_fc_en: Receive Flow Control Enable - * tx_crc_en: Transmit CRC Enabled - * tx_pad_en: Transmit Padding Enable - * tx_cf_en: Transmit Flow Control Enable - * tx_en: Transmit Enable - * rx_en: Receive Enable - */ - struct { - u32 - tx_pr_len:4, - tx_ifg_nib:4, - nib_mode:1, - rx_pr_check_en:1, - tx_ifg:6, - rx_ifg:4, - tx_fc_retr:3, - rx_length_check_en:1, - rx_crc_ignore:1, - rx_crc_strip:1, - rx_fc_en:1, - tx_crc_en:1, - tx_pad_en:1, - tx_fc_en:1, - tx_en:1, - rx_en:1; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 1 register */ -struct nps_enet_ge_mac_cfg_1 { - union { - /* octet_3: MAC address octet 3 - * octet_2: MAC address octet 2 - * octet_1: MAC address octet 1 - * octet_0: MAC address octet 0 - */ - struct { - u32 - octet_3:8, - octet_2:8, - octet_1:8, - octet_0:8; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 2 register */ -struct nps_enet_ge_mac_cfg_2 { - union { - /* transmit_flush_en: MAC flush enable - * stat_en: RMON statistics interface enable - * disc_da: Discard frames with DA different - * from MAC address - * disc_bc: Discard broadcast frames - * disc_mc: Discard multicast frames - * octet_5: MAC address octet 5 - * octet_4: MAC address octet 4 - */ - struct { - u32 - transmit_flush_en:1, - __reserved_1:5, - stat_en:2, - __reserved_2:1, - disc_da:1, - disc_bc:1, - disc_mc:1, - __reserved_3:4, - octet_5:8, - octet_4:8; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 3 register */ -struct nps_enet_ge_mac_cfg_3 { - union { - /* ext_oob_cbfc_sel: Selects one of the 4 profiles for - * extended OOB in-flow-control indication - * max_len: Maximum receive frame length in bytes - * tx_cbfc_en: Enable transmission of class-based - * flow control packets - * rx_ifg_th: Threshold for IFG status reporting via OOB - * cf_timeout: Configurable time to decrement FC counters - * cf_drop: Drop control frames - * redirect_cbfc_sel: Selects one of CBFC redirect profiles - * rx_cbfc_redir_en: Enable Rx class-based flow - * control redirect - * rx_cbfc_en: Enable Rx class-based flow control - * tm_hd_mode: TM header mode - */ - struct { - u32 - ext_oob_cbfc_sel:2, - max_len:14, - tx_cbfc_en:1, - rx_ifg_th:5, - cf_timeout:4, - cf_drop:1, - redirect_cbfc_sel:2, - rx_cbfc_redir_en:1, - rx_cbfc_en:1, - tm_hd_mode:1; - }; - - u32 value; - }; -}; - -/* GE MAC, PCS reset control register */ -struct nps_enet_ge_rst { - union { - /* gmac_0: GE MAC reset - * spcs_0: SGMII PCS reset - */ - struct { - u32 - __reserved_1:23, - gmac_0:1, - __reserved_2:7, - spcs_0:1; - }; - - u32 value; - }; -}; - -/* Tx phase sync FIFO control register */ -struct nps_enet_phase_fifo_ctl { - union { - /* init: initialize serdes TX phase sync FIFO pointers - * rst: reset serdes TX phase sync FIFO - */ - struct { - u32 - __reserved:30, - init:1, - rst:1; - }; - - u32 value; - }; -}; +/* Tx control register masks and shifts */ +#define TX_CTL_NT_MASK 0x7FF +#define TX_CTL_NT_SHIFT 0 +#define TX_CTL_ET_MASK 0x4000 +#define TX_CTL_ET_SHIFT 14 +#define TX_CTL_CT_MASK 0x8000 +#define TX_CTL_CT_SHIFT 15 + +/* Rx control register masks and shifts */ +#define RX_CTL_NR_MASK 0x7FF +#define RX_CTL_NR_SHIFT 0 +#define RX_CTL_CRC_MASK 0x2000 +#define RX_CTL_CRC_SHIFT 13 +#define RX_CTL_ER_MASK 0x4000 +#define RX_CTL_ER_SHIFT 14 +#define RX_CTL_CR_MASK 0x8000 +#define RX_CTL_CR_SHIFT 15 + +/* Interrupt enable for data buffer events register masks and shifts */ +#define RX_RDY_MASK 0x1 +#define RX_RDY_SHIFT 0 +#define TX_DONE_MASK 0x2 +#define TX_DONE_SHIFT 1 + +/* Gbps Eth MAC Configuration 0 register masks and shifts */ +#define CFG_0_RX_EN_MASK 0x1 +#define CFG_0_RX_EN_SHIFT 0 +#define CFG_0_TX_EN_MASK 0x2 +#define CFG_0_TX_EN_SHIFT 1 +#define CFG_0_TX_FC_EN_MASK 0x4 +#define CFG_0_TX_FC_EN_SHIFT 2 +#define CFG_0_TX_PAD_EN_MASK 0x8 +#define CFG_0_TX_PAD_EN_SHIFT 3 +#define CFG_0_TX_CRC_EN_MASK 0x10 +#define CFG_0_TX_CRC_EN_SHIFT 4 +#define CFG_0_RX_FC_EN_MASK 0x20 +#define CFG_0_RX_FC_EN_SHIFT 5 +#define CFG_0_RX_CRC_STRIP_MASK 0x40 +#define CFG_0_RX_CRC_STRIP_SHIFT 6 +#define CFG_0_RX_CRC_IGNORE_MASK 0x80 +#define CFG_0_RX_CRC_IGNORE_SHIFT 7 +#define CFG_0_RX_LENGTH_CHECK_EN_MASK 0x100 +#define CFG_0_RX_LENGTH_CHECK_EN_SHIFT 8 +#define CFG_0_TX_FC_RETR_MASK 0xE00 +#define CFG_0_TX_FC_RETR_SHIFT 9 +#define CFG_0_RX_IFG_MASK 0xF000 +#define CFG_0_RX_IFG_SHIFT 12 +#define CFG_0_TX_IFG_MASK 0x3F0000 +#define CFG_0_TX_IFG_SHIFT 16 +#define CFG_0_RX_PR_CHECK_EN_MASK 0x400000 +#define CFG_0_RX_PR_CHECK_EN_SHIFT 22 +#define CFG_0_NIB_MODE_MASK 0x800000 +#define CFG_0_NIB_MODE_SHIFT 23 +#define CFG_0_TX_IFG_NIB_MASK 0xF000000 +#define CFG_0_TX_IFG_NIB_SHIFT 24 +#define CFG_0_TX_PR_LEN_MASK 0xF0000000 +#define CFG_0_TX_PR_LEN_SHIFT 28 + +/* Gbps Eth MAC Configuration 1 register masks and shifts */ +#define CFG_1_OCTET_0_MASK 0x000000FF +#define CFG_1_OCTET_0_SHIFT 0 +#define CFG_1_OCTET_1_MASK 0x0000FF00 +#define CFG_1_OCTET_1_SHIFT 8 +#define CFG_1_OCTET_2_MASK 0x00FF0000 +#define CFG_1_OCTET_2_SHIFT 16 +#define CFG_1_OCTET_3_MASK 0xFF000000 +#define CFG_1_OCTET_3_SHIFT 24 + +/* Gbps Eth MAC Configuration 2 register masks and shifts */ +#define CFG_2_OCTET_4_MASK 0x000000FF +#define CFG_2_OCTET_4_SHIFT 0 +#define CFG_2_OCTET_5_MASK 0x0000FF00 +#define CFG_2_OCTET_5_SHIFT 8 +#define CFG_2_DISK_MC_MASK 0x00100000 +#define CFG_2_DISK_MC_SHIFT 20 +#define CFG_2_DISK_BC_MASK 0x00200000 +#define CFG_2_DISK_BC_SHIFT 21 +#define CFG_2_DISK_DA_MASK 0x00400000 +#define CFG_2_DISK_DA_SHIFT 22 +#define CFG_2_STAT_EN_MASK 0x3000000 +#define CFG_2_STAT_EN_SHIFT 24 +#define CFG_2_TRANSMIT_FLUSH_EN_MASK 0x80000000 +#define CFG_2_TRANSMIT_FLUSH_EN_SHIFT 31 + +/* Gbps Eth MAC Configuration 3 register masks and shifts */ +#define CFG_3_TM_HD_MODE_MASK 0x1 +#define CFG_3_TM_HD_MODE_SHIFT 0 +#define CFG_3_RX_CBFC_EN_MASK 0x2 +#define CFG_3_RX_CBFC_EN_SHIFT 1 +#define CFG_3_RX_CBFC_REDIR_EN_MASK 0x4 +#define CFG_3_RX_CBFC_REDIR_EN_SHIFT 2 +#define CFG_3_REDIRECT_CBFC_SEL_MASK 0x18 +#define CFG_3_REDIRECT_CBFC_SEL_SHIFT 3 +#define CFG_3_CF_DROP_MASK 0x20 +#define CFG_3_CF_DROP_SHIFT 5 +#define CFG_3_CF_TIMEOUT_MASK 0x3C0 +#define CFG_3_CF_TIMEOUT_SHIFT 6 +#define CFG_3_RX_IFG_TH_MASK 0x7C00 +#define CFG_3_RX_IFG_TH_SHIFT 10 +#define CFG_3_TX_CBFC_EN_MASK 0x8000 +#define CFG_3_TX_CBFC_EN_SHIFT 15 +#define CFG_3_MAX_LEN_MASK 0x3FFF0000 +#define CFG_3_MAX_LEN_SHIFT 16 +#define CFG_3_EXT_OOB_CBFC_SEL_MASK 0xC0000000 +#define CFG_3_EXT_OOB_CBFC_SEL_SHIFT 30 + +/* GE MAC, PCS reset control register masks and shifts */ +#define RST_SPCS_MASK 0x1 +#define RST_SPCS_SHIFT 0 +#define RST_GMAC_0_MASK 0x100 +#define RST_GMAC_0_SHIFT 8 + +/* Tx phase sync FIFO control register masks and shifts */ +#define PHASE_FIFO_CTL_RST_MASK 0x1 +#define PHASE_FIFO_CTL_RST_SHIFT 0 +#define PHASE_FIFO_CTL_INIT_MASK 0x2 +#define PHASE_FIFO_CTL_INIT_SHIFT 1 /** * struct nps_enet_priv - Storage of ENET's private information. @@ -285,8 +175,8 @@ struct nps_enet_priv { bool tx_packet_sent; struct sk_buff *tx_skb; struct napi_struct napi; - struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2; - struct nps_enet_ge_mac_cfg_3 ge_mac_cfg_3; + u32 ge_mac_cfg_2_value; + u32 ge_mac_cfg_3_value; }; /** diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 2106d72c91dc..195122e11f10 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -64,6 +64,7 @@ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ +#define FEC_FTRL 0x1b0 /* Frame truncation receive length*/ #define FEC_RACC 0x1c4 /* Receive Accelerator function */ #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ @@ -309,12 +310,6 @@ struct bufdesc_ex { #define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ (((X) == 2) ? \ FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) -#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \ - (((X) == 2) ? \ - FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) -#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \ - (((X) == 2) ? \ - FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) @@ -380,6 +375,7 @@ struct bufdesc_ex { #define FEC_ENET_TS_TIMER ((uint)0x00008000) #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) /* ENET interrupt coalescing macro define */ @@ -447,33 +443,35 @@ struct bufdesc_ex { /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +struct bufdesc_prop { + int qid; + /* Address of Rx and Tx buffers */ + struct bufdesc *base; + struct bufdesc *last; + struct bufdesc *cur; + void __iomem *reg_desc_active; + dma_addr_t dma; + unsigned short ring_size; + unsigned char dsize; + unsigned char dsize_log2; +}; + struct fec_enet_priv_tx_q { - int index; + struct bufdesc_prop bd; unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE]; - dma_addr_t bd_dma; - struct bufdesc *tx_bd_base; - uint tx_ring_size; - unsigned short tx_stop_threshold; unsigned short tx_wake_threshold; - struct bufdesc *cur_tx; struct bufdesc *dirty_tx; char *tso_hdrs; dma_addr_t tso_hdrs_dma; }; struct fec_enet_priv_rx_q { - int index; + struct bufdesc_prop bd; struct sk_buff *rx_skbuff[RX_RING_SIZE]; - - dma_addr_t bd_dma; - struct bufdesc *rx_bd_base; - uint rx_ring_size; - - struct bufdesc *cur_rx; }; /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and @@ -513,8 +511,6 @@ struct fec_enet_private { unsigned long work_ts; unsigned long work_mdio; - unsigned short bufdesc_size; - struct platform_device *pdev; int dev_id; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 41c81f6ec630..37c081583084 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -217,86 +217,38 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define IS_TSO_HEADER(txq, addr) \ ((addr >= txq->tso_hdrs_dma) && \ - (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) static int mii_cnt; -static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, - struct fec_enet_private *fep, - int queue_id) -{ - struct bufdesc *new_bd = bdp + 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; - struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; - struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= txq->tx_bd_base) { - base = txq->tx_bd_base; - ring_size = txq->tx_ring_size; - ex_base = (struct bufdesc_ex *)txq->tx_bd_base; - } else { - base = rxq->rx_bd_base; - ring_size = rxq->rx_ring_size; - ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; - } - - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? - ex_base : ex_new_bd); - else - return (new_bd >= (base + ring_size)) ? - base : new_bd; -} - -static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, - struct fec_enet_private *fep, - int queue_id) -{ - struct bufdesc *new_bd = bdp - 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; - struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; - struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= txq->tx_bd_base) { - base = txq->tx_bd_base; - ring_size = txq->tx_ring_size; - ex_base = (struct bufdesc_ex *)txq->tx_bd_base; - } else { - base = rxq->rx_bd_base; - ring_size = rxq->rx_ring_size; - ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; - } +static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp >= bd->last) ? bd->base + : (struct bufdesc *)(((unsigned)bdp) + bd->dsize); +} - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd < ex_base) ? - (ex_new_bd + ring_size) : ex_new_bd); - else - return (new_bd < base) ? (new_bd + ring_size) : new_bd; +static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp <= bd->base) ? bd->last + : (struct bufdesc *)(((unsigned)bdp) - bd->dsize); } -static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, - struct fec_enet_private *fep) +static int fec_enet_get_bd_index(struct bufdesc *bdp, + struct bufdesc_prop *bd) { - return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; + return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; } -static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, - struct fec_enet_priv_tx_q *txq) +static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) { int entries; - entries = ((const char *)txq->dirty_tx - - (const char *)txq->cur_tx) / fep->bufdesc_size - 1; + entries = (((const char *)txq->dirty_tx - + (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; - return entries > 0 ? entries : entries + txq->tx_ring_size; + return entries >= 0 ? entries : entries + txq->bd.ring_size; } static void swap_buffer(void *bufaddr, int len) @@ -329,20 +281,20 @@ static void fec_dump(struct net_device *ndev) pr_info("Nr SC addr len SKB\n"); txq = fep->tx_queue[0]; - bdp = txq->tx_bd_base; + bdp = txq->bd.base; do { pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", index, - bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->bd.cur ? 'S' : ' ', bdp == txq->dirty_tx ? 'H' : ' ', fec16_to_cpu(bdp->cbd_sc), fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), txq->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep, 0); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); index++; - } while (bdp != txq->tx_bd_base); + } while (bdp != txq->bd.base); } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -373,10 +325,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *bdp = txq->cur_tx; + struct bufdesc *bdp = txq->bd.cur; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned short queue = skb_get_queue_mapping(skb); int frag, frag_len; unsigned short status; unsigned int estatus = 0; @@ -388,7 +339,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); ebdp = (struct bufdesc_ex *)bdp; status = fec16_to_cpu(bdp->cbd_sc); @@ -409,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -418,7 +369,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], bufaddr, frag_len); @@ -431,7 +382,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, DMA_TO_DEVICE); if (dma_mapping_error(&fep->pdev->dev, addr)) { - dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); goto dma_mapping_error; @@ -439,14 +389,18 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_bufaddr = cpu_to_fec32(addr); bdp->cbd_datlen = cpu_to_fec16(frag_len); + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + wmb(); bdp->cbd_sc = cpu_to_fec16(status); } return bdp; dma_mapping_error: - bdp = txq->cur_tx; + bdp = txq->bd.cur; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); } @@ -463,12 +417,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, dma_addr_t addr; unsigned short status; unsigned short buflen; - unsigned short queue; unsigned int estatus = 0; unsigned int index; int entries_free; - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { dev_kfree_skb_any(skb); if (net_ratelimit()) @@ -483,7 +436,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, } /* Fill in a Tx ring entry */ - bdp = txq->cur_tx; + bdp = txq->bd.cur; last_bdp = bdp; status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; @@ -492,8 +445,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, bufaddr = skb->data; buflen = skb_headlen(skb); - queue = skb_get_queue_mapping(skb); - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, buflen); @@ -514,8 +466,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, if (nr_frags) { last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); - if (IS_ERR(last_bdp)) + if (IS_ERR(last_bdp)) { + dma_unmap_single(&fep->pdev->dev, addr, + buflen, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; + } } else { status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) { @@ -525,6 +481,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, estatus |= BD_ENET_TX_TS; } } + bdp->cbd_bufaddr = cpu_to_fec32(addr); + bdp->cbd_datlen = cpu_to_fec16(buflen); if (fep->bufdesc_ex) { @@ -535,7 +493,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -544,12 +502,14 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ebdp->cbd_esc = cpu_to_fec32(estatus); } - index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); + index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */ txq->tx_skbuff[index] = skb; - bdp->cbd_datlen = cpu_to_fec16(buflen); - bdp->cbd_bufaddr = cpu_to_fec32(addr); + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + wmb(); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. @@ -558,18 +518,18 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_sc = cpu_to_fec16(status); /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); + bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); skb_tx_timestamp(skb); /* Make sure the update to bdp and tx_skbuff are performed before - * cur_tx. + * txq->bd.cur. */ wmb(); - txq->cur_tx = bdp; + txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + writel(0, txq->bd.reg_desc_active); return 0; } @@ -582,7 +542,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, { struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); - unsigned short queue = skb_get_queue_mapping(skb); unsigned short status; unsigned int estatus = 0; dma_addr_t addr; @@ -614,7 +573,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -643,7 +602,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); - unsigned short queue = skb_get_queue_mapping(skb); void *bufaddr; unsigned long dmabuf; unsigned short status; @@ -678,7 +636,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -697,13 +655,12 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - struct bufdesc *bdp = txq->cur_tx; - unsigned short queue = skb_get_queue_mapping(skb); + struct bufdesc *bdp = txq->bd.cur; struct tso_t tso; unsigned int index = 0; int ret; - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "NOT enough BD for TSO!\n"); @@ -723,7 +680,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, while (total_len > 0) { char *hdr; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; @@ -738,9 +695,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, int size; size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep, queue); - index = fec_enet_get_bd_index(txq->tx_bd_base, - bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + index = fec_enet_get_bd_index(bdp, &txq->bd); ret = fec_enet_txq_put_data_tso(txq, skb, ndev, bdp, index, tso.data, size, @@ -753,22 +709,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Save skb pointer */ txq->tx_skbuff[index] = skb; skb_tx_timestamp(skb); - txq->cur_tx = bdp; + txq->bd.cur = bdp; /* Trigger transmission start */ if (!(fep->quirks & FEC_QUIRK_ERR007885) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; @@ -798,7 +754,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (ret) return ret; - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free <= txq->tx_stop_threshold) netif_tx_stop_queue(nq); @@ -819,32 +775,32 @@ static void fec_enet_bd_init(struct net_device *dev) for (q = 0; q < fep->num_rx_queues; q++) { /* Initialize the receive buffer descriptors. */ rxq = fep->rx_queue[q]; - bdp = rxq->rx_bd_base; + bdp = rxq->bd.base; - for (i = 0; i < rxq->rx_ring_size; i++) { + for (i = 0; i < rxq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); else bdp->cbd_sc = cpu_to_fec16(0); - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); - rxq->cur_rx = rxq->rx_bd_base; + rxq->bd.cur = rxq->bd.base; } for (q = 0; q < fep->num_tx_queues; q++) { /* ...and the same for transmit */ txq = fep->tx_queue[q]; - bdp = txq->tx_bd_base; - txq->cur_tx = bdp; + bdp = txq->bd.base; + txq->bd.cur = bdp; - for (i = 0; i < txq->tx_ring_size; i++) { + for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); if (txq->tx_skbuff[i]) { @@ -852,11 +808,11 @@ static void fec_enet_bd_init(struct net_device *dev) txq->tx_skbuff[i] = NULL; } bdp->cbd_bufaddr = cpu_to_fec32(0); - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); txq->dirty_tx = bdp; } @@ -868,7 +824,7 @@ static void fec_enet_active_rxring(struct net_device *ndev) int i; for (i = 0; i < fep->num_rx_queues; i++) - writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); + writel(0, fep->rx_queue[i]->bd.reg_desc_active); } static void fec_enet_enable_ring(struct net_device *ndev) @@ -880,7 +836,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; - writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ @@ -891,7 +847,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) { txq = fep->tx_queue[i]; - writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); /* enable DMA1/2 */ if (i) @@ -909,7 +865,7 @@ static void fec_enet_reset_skb(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) { txq = fep->tx_queue[i]; - for (j = 0; j < txq->tx_ring_size; j++) { + for (j = 0; j < txq->bd.ring_size; j++) { if (txq->tx_skbuff[j]) { dev_kfree_skb_any(txq->tx_skbuff[j]); txq->tx_skbuff[j] = NULL; @@ -988,6 +944,7 @@ fec_restart(struct net_device *ndev) val &= ~FEC_RACC_OPTIONS; writel(val, fep->hwp + FEC_RACC); } + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); #endif /* @@ -1221,16 +1178,16 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) bdp = txq->dirty_tx; /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); - while (bdp != READ_ONCE(txq->cur_tx)) { - /* Order the load of cur_tx and cbd_sc */ + while (bdp != READ_ONCE(txq->bd.cur)) { + /* Order the load of bd.cur and cbd_sc */ rmb(); status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); if (status & BD_ENET_TX_READY) break; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; @@ -1241,7 +1198,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) DMA_TO_DEVICE); bdp->cbd_bufaddr = cpu_to_fec32(0); if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); continue; } @@ -1290,21 +1247,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); /* Since we have freed up a buffer, the ring is no longer full */ if (netif_queue_stopped(ndev)) { - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free >= txq->tx_wake_threshold) netif_tx_wake_queue(nq); } } /* ERR006538: Keep the transmitter going */ - if (bdp != txq->cur_tx && - readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); + if (bdp != txq->bd.cur && + readl(txq->bd.reg_desc_active) == 0) + writel(0, txq->bd.reg_desc_active); } static void @@ -1366,7 +1323,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, return true; } -/* During a receive, the cur_rx points to the current incoming buffer. +/* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. @@ -1399,7 +1356,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = rxq->cur_rx; + bdp = rxq->bd.cur; while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { @@ -1407,37 +1364,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) break; pkt_received++; - /* Since we have allocated space to hold a complete frame, - * the last indicator should be set. - */ - if ((status & BD_ENET_RX_LAST) == 0) - netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ + status ^= BD_ENET_RX_LAST; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | - BD_ENET_RX_CR | BD_ENET_RX_OV)) { + BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | + BD_ENET_RX_CL)) { ndev->stats.rx_errors++; - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { + if (status & BD_ENET_RX_OV) { + /* FIFO overrun */ + ndev->stats.rx_fifo_errors++; + goto rx_processing_done; + } + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH + | BD_ENET_RX_LAST)) { /* Frame too long or too short. */ ndev->stats.rx_length_errors++; + if (status & BD_ENET_RX_LAST) + netdev_err(ndev, "rcv is not +last\n"); } - if (status & BD_ENET_RX_NO) /* Frame alignment */ - ndev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ ndev->stats.rx_crc_errors++; - if (status & BD_ENET_RX_OV) /* FIFO overrun */ - ndev->stats.rx_fifo_errors++; - } - - /* Report late collisions as a frame error. - * On this error, the BD is closed, but we don't know what we - * have in the buffer. So, just drop this frame on the floor. - */ - if (status & BD_ENET_RX_CL) { - ndev->stats.rx_errors++; - ndev->stats.rx_frame_errors++; + /* Report late collisions as a frame error. */ + if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + ndev->stats.rx_frame_errors++; goto rx_processing_done; } @@ -1446,7 +1397,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) pkt_len = fec16_to_cpu(bdp->cbd_datlen); ndev->stats.rx_bytes += pkt_len; - index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &rxq->bd); skb = rxq->rx_skbuff[index]; /* The packet length includes FCS, but we don't want to @@ -1535,7 +1486,6 @@ rx_processing_done: /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; - bdp->cbd_sc = cpu_to_fec16(status); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; @@ -1544,17 +1494,22 @@ rx_processing_done: ebdp->cbd_prot = 0; ebdp->cbd_bdu = 0; } + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + wmb(); + bdp->cbd_sc = cpu_to_fec16(status); /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); + writel(0, rxq->bd.reg_desc_active); } - rxq->cur_rx = bdp; + rxq->bd.cur = bdp; return pkt_received; } @@ -1613,7 +1568,7 @@ fec_enet_interrupt(int irq, void *dev_id) if (napi_schedule_prep(&fep->napi)) { /* Disable the NAPI interrupts */ - writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); + writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK); __napi_schedule(&fep->napi); } } @@ -2663,8 +2618,8 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; - bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { skb = rxq->rx_skbuff[i]; rxq->rx_skbuff[i] = NULL; if (skb) { @@ -2674,14 +2629,14 @@ static void fec_enet_free_buffers(struct net_device *ndev) DMA_FROM_DEVICE); dev_kfree_skb(skb); } - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } } for (q = 0; q < fep->num_tx_queues; q++) { txq = fep->tx_queue[q]; - bdp = txq->tx_bd_base; - for (i = 0; i < txq->tx_ring_size; i++) { + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { kfree(txq->tx_bounce[i]); txq->tx_bounce[i] = NULL; skb = txq->tx_skbuff[i]; @@ -2701,7 +2656,7 @@ static void fec_enet_free_queue(struct net_device *ndev) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; dma_free_coherent(NULL, - txq->tx_ring_size * TSO_HEADER_SIZE, + txq->bd.ring_size * TSO_HEADER_SIZE, txq->tso_hdrs, txq->tso_hdrs_dma); } @@ -2727,15 +2682,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev) } fep->tx_queue[i] = txq; - txq->tx_ring_size = TX_RING_SIZE; - fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + txq->bd.ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_wake_threshold = - (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + (txq->bd.ring_size - txq->tx_stop_threshold) / 2; txq->tso_hdrs = dma_alloc_coherent(NULL, - txq->tx_ring_size * TSO_HEADER_SIZE, + txq->bd.ring_size * TSO_HEADER_SIZE, &txq->tso_hdrs_dma, GFP_KERNEL); if (!txq->tso_hdrs) { @@ -2752,8 +2707,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev) goto alloc_failed; } - fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; - fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; + fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; } return ret; @@ -2772,8 +2727,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) struct fec_enet_priv_rx_q *rxq; rxq = fep->rx_queue[queue]; - bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) goto err_alloc; @@ -2791,11 +2746,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, queue); + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; @@ -2813,8 +2768,8 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) struct fec_enet_priv_tx_q *txq; txq = fep->tx_queue[queue]; - bdp = txq->tx_bd_base; - for (i = 0; i < txq->tx_ring_size; i++) { + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); if (!txq->tx_bounce[i]) goto err_alloc; @@ -2827,11 +2782,11 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, queue); + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; @@ -3115,6 +3070,14 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_set_features = fec_set_features, }; +static const unsigned short offset_des_active_rxq[] = { + FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 +}; + +static const unsigned short offset_des_active_txq[] = { + FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 +}; + /* * XXX: We need to clean up on failure exits here. * @@ -3122,13 +3085,15 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_enet_priv_tx_q *txq; - struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; dma_addr_t bd_dma; int bd_size; unsigned int i; + unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned dsize_log2 = __fls(dsize); + WARN_ON(dsize != (1 << dsize_log2)); #if defined(CONFIG_ARM) fep->rx_align = 0xf; fep->tx_align = 0xf; @@ -3139,12 +3104,7 @@ static int fec_enet_init(struct net_device *ndev) fec_enet_alloc_queue(ndev); - if (fep->bufdesc_ex) - fep->bufdesc_size = sizeof(struct bufdesc_ex); - else - fep->bufdesc_size = sizeof(struct bufdesc); - bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * - fep->bufdesc_size; + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; /* Allocate memory for buffer descriptors. */ cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, @@ -3162,33 +3122,35 @@ static int fec_enet_init(struct net_device *ndev) /* Set receive and transmit descriptor base. */ for (i = 0; i < fep->num_rx_queues; i++) { - rxq = fep->rx_queue[i]; - rxq->index = i; - rxq->rx_bd_base = (struct bufdesc *)cbd_base; - rxq->bd_dma = bd_dma; - if (fep->bufdesc_ex) { - bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; - cbd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); - } else { - bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; - cbd_base += rxq->rx_ring_size; - } + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; + unsigned size = dsize * rxq->bd.ring_size; + + rxq->bd.qid = i; + rxq->bd.base = cbd_base; + rxq->bd.cur = cbd_base; + rxq->bd.dma = bd_dma; + rxq->bd.dsize = dsize; + rxq->bd.dsize_log2 = dsize_log2; + rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); } for (i = 0; i < fep->num_tx_queues; i++) { - txq = fep->tx_queue[i]; - txq->index = i; - txq->tx_bd_base = (struct bufdesc *)cbd_base; - txq->bd_dma = bd_dma; - if (fep->bufdesc_ex) { - bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; - cbd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); - } else { - bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; - cbd_base += txq->tx_ring_size; - } + struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; + unsigned size = dsize * txq->bd.ring_size; + + txq->bd.qid = i; + txq->bd.base = cbd_base; + txq->bd.cur = cbd_base; + txq->bd.dma = bd_dma; + txq->bd.dsize = dsize; + txq->bd.dsize_log2 = dsize_log2; + txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); } @@ -3229,6 +3191,7 @@ static int fec_enet_init(struct net_device *ndev) static void fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; + bool active_high = false; int msec = 1; struct device_node *np = pdev->dev.of_node; @@ -3244,14 +3207,17 @@ static void fec_reset_phy(struct platform_device *pdev) if (!gpio_is_valid(phy_reset)) return; + active_high = of_property_read_bool(np, "phy-reset-active-high"); + err = devm_gpio_request_one(&pdev->dev, phy_reset, - GPIOF_OUT_INIT_LOW, "phy-reset"); + active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, + "phy-reset"); if (err) { dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } msleep(msec); - gpio_set_value_cansleep(phy_reset, 1); + gpio_set_value_cansleep(phy_reset, !active_high); } #else /* CONFIG_OF */ static void fec_reset_phy(struct platform_device *pdev) diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 623aa1c8ebc6..79a210aaf0bb 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_free; } + fman->dev = &of_dev->dev; + return fman; fman_node_put: @@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev) dev_set_drvdata(dev, fman); - fman->dev = dev; - dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); return 0; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 7c92eb854925..c88918c4c5f3 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -932,15 +932,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; - /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ - if (dtsec->fm_rev_info.major == 2) - if (pause_time <= 320) { + if (pause_time) { + /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ + if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) { pr_warn("pause-time: %d illegal.Should be > 320\n", pause_time); return -EINVAL; } - if (pause_time) { ptv = ioread32be(®s->ptv); ptv &= PTV_PTE_MASK; ptv |= pause_time & PTV_PT_MASK; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2aa7b401cc3b..d2f917af539f 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv) if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) priv->errata |= GFAR_ERRATA_12; + /* P2020/P1010 Rev 1; MPC8548 Rev 2 */ if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || - ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || + ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ } #endif @@ -2322,6 +2324,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; u32 lstatus; + skb_frag_t *frag; int i, rq = 0; int do_tstamp, do_csum, do_vlan; u32 bufaddr; @@ -2389,52 +2392,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txbdp = txbdp_start = tx_queue->cur_tx; lstatus = be32_to_cpu(txbdp->lstatus); - /* Time stamp insertion requires one additional TxBD */ - if (unlikely(do_tstamp)) - txbdp_tstamp = txbdp = next_txbd(txbdp, base, - tx_queue->tx_ring_size); - - if (nr_frags == 0) { - if (unlikely(do_tstamp)) { - u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); - - lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); - } else { - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - } - } else { - /* Place the fragment addresses and lengths into the TxBDs */ - for (i = 0; i < nr_frags; i++) { - unsigned int frag_len; - /* Point at the next BD, wrapping as needed */ - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - - frag_len = skb_shinfo(skb)->frags[i].size; - - lstatus = be32_to_cpu(txbdp->lstatus) | frag_len | - BD_LFLAG(TXBD_READY); - - /* Handle the last BD specially */ - if (i == nr_frags - 1) - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - - bufaddr = skb_frag_dma_map(priv->dev, - &skb_shinfo(skb)->frags[i], - 0, - frag_len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, bufaddr))) - goto dma_map_err; - - /* set the TxBD length and buffer pointer */ - txbdp->bufPtr = cpu_to_be32(bufaddr); - txbdp->lstatus = cpu_to_be32(lstatus); - } - - lstatus = be32_to_cpu(txbdp_start->lstatus); - } - /* Add TxPAL between FCB and frame if required */ if (unlikely(do_tstamp)) { skb_push(skb, GMAC_TXPAL_LEN); @@ -2469,12 +2426,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (do_vlan) gfar_tx_vlan(skb, fcb); - /* Setup tx hardware time stamping if requested */ - if (unlikely(do_tstamp)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - fcb->ptp = 1; - } - bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(priv->dev, bufaddr))) @@ -2482,6 +2433,46 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txbdp_start->bufPtr = cpu_to_be32(bufaddr); + /* Time stamp insertion requires one additional TxBD */ + if (unlikely(do_tstamp)) + txbdp_tstamp = txbdp = next_txbd(txbdp, base, + tx_queue->tx_ring_size); + + if (likely(!nr_frags)) { + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } else { + u32 lstatus_start = lstatus; + + /* Place the fragment addresses and lengths into the TxBDs */ + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < nr_frags; i++, frag++) { + unsigned int size; + + /* Point at the next BD, wrapping as needed */ + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + + size = skb_frag_size(frag); + + lstatus = be32_to_cpu(txbdp->lstatus) | size | + BD_LFLAG(TXBD_READY); + + /* Handle the last BD specially */ + if (i == nr_frags - 1) + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + + bufaddr = skb_frag_dma_map(priv->dev, frag, 0, + size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; + + /* set the TxBD length and buffer pointer */ + txbdp->bufPtr = cpu_to_be32(bufaddr); + txbdp->lstatus = cpu_to_be32(lstatus); + } + + lstatus = lstatus_start; + } + /* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of * GMAC_FCB_LEN. The second TxBD points to the actual frame data with @@ -2492,12 +2483,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) bufaddr = be32_to_cpu(txbdp_start->bufPtr); bufaddr += fcb_len; + lstatus_ts |= BD_LFLAG(TXBD_READY) | (skb_headlen(skb) - fcb_len); + if (!nr_frags) + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; + + /* Setup tx hardware time stamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + fcb->ptp = 1; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); } @@ -2710,7 +2708,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ~0x7UL); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ns_to_ktime(*ns); + shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); skb_tstamp_tx(skb, &shhwtstamps); gfar_clear_txbd_status(bdp); @@ -2942,7 +2940,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, /* change offset to the other half */ rxb->page_offset ^= GFAR_RXB_TRUESIZE; - atomic_inc(&page->_count); + page_ref_inc(page); return true; } @@ -3039,7 +3037,7 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) u64 *ns = (u64 *) skb->data; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(*ns); + shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); } if (priv->padding) diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index b40fba929d65..57798814160d 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -422,19 +422,6 @@ static struct ptp_clock_info ptp_gianfar_caps = { .enable = ptp_gianfar_enable, }; -/* OF device tree */ - -static int get_of_u32(struct device_node *node, char *str, u32 *val) -{ - int plen; - const u32 *prop = of_get_property(node, str, &plen); - - if (!prop || plen != sizeof(*prop)) - return -1; - *val = *prop; - return 0; -} - static int gianfar_ptp_probe(struct platform_device *dev) { struct device_node *node = dev->dev.of_node; @@ -452,15 +439,21 @@ static int gianfar_ptp_probe(struct platform_device *dev) etsects->caps = ptp_gianfar_caps; - if (get_of_u32(node, "fsl,cksel", &etsects->cksel)) + if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel)) etsects->cksel = DEFAULT_CKSEL; - if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || - get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || - get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) || - get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) || - get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) || - get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) { + if (of_property_read_u32(node, + "fsl,tclk-period", &etsects->tclk_period) || + of_property_read_u32(node, + "fsl,tmr-prsc", &etsects->tmr_prsc) || + of_property_read_u32(node, + "fsl,tmr-add", &etsects->tmr_add) || + of_property_read_u32(node, + "fsl,tmr-fiper1", &etsects->tmr_fiper1) || + of_property_read_u32(node, + "fsl,tmr-fiper2", &etsects->tmr_fiper2) || + of_property_read_u32(node, + "fsl,max-adj", &etsects->caps.max_adj)) { pr_err("device tree node missing required elements\n"); goto no_node; } diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index a7139f588ad2..678f5018d0be 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -469,8 +469,8 @@ static int fmvj18x_config(struct pcmcia_device *link) goto failed; } /* Read MACID from CIS */ - for (i = 5; i < 11; i++) - dev->dev_addr[i] = buf[i]; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = buf[i + 5]; kfree(buf); } else { if (pcmcia_get_mac_from_cis(link, dev)) diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 74beb1867230..4ccc032633c4 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -25,6 +25,7 @@ config HIX5HD2_GMAC config HIP04_ETH tristate "HISILICON P04 Ethernet support" + depends on HAS_IOMEM # For MFD_SYSCON select MARVELL_PHY select MFD_SYSCON select HNS_MDIO diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a0070d0e740d..d4f92ed322d6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, { int ret; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); switch (loop) { + case MAC_INTERNALLOOP_PHY: + ret = 0; + break; case MAC_INTERNALLOOP_SERDES: ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); break; @@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, default: ret = -EINVAL; } + + if (!ret) + hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en); + return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 9439f04962e1..38fc5be3870c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -230,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) } } +static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) +{ + u16 max_q_per_vf, max_vfn; + u32 q_id, q_num_per_port; + u32 mac_id; + + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + return; + + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, + HNS_DSAF_COMM_SERVICE_NW_IDX, + &max_vfn, &max_q_per_vf); + q_num_per_port = max_vfn * max_q_per_vf; + + for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { + dsaf_set_dev_field(dsaf_dev, + DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_QID_M, + DSAFV2_SERDES_LBK_QID_S, + q_id); + q_id += q_num_per_port; + } +} + /** * hns_dsaf_sw_port_type_cfg - cfg sw type * @dsaf_id: dsa fabric id @@ -691,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); } +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver) || + dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG) + return; + + dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_EN_B, !!en); +} + /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id @@ -1022,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) /* set promisc def queue id */ hns_dsaf_mix_def_qid_cfg(dsaf_dev); + /* set inner loopback queue id */ + hns_dsaf_inner_qid_cfg(dsaf_dev); + /* in non switch mode, set all port to access mode */ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 40205b910f80..5fea226efaf3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -417,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index f0c4f9b09d5b..60d695daa471 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -134,6 +134,7 @@ #define DSAF_XGE_INT_STS_0_REG 0x1C0 #define DSAF_PPE_INT_STS_0_REG 0x1E0 #define DSAF_ROCEE_INT_STS_0_REG 0x200 +#define DSAFV2_SERDES_LBK_0_REG 0x220 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -857,6 +858,10 @@ #define PPEV2_CFG_RSS_TBL_4N3_S 24 #define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) +#define DSAFV2_SERDES_LBK_EN_B 8 +#define DSAFV2_SERDES_LBK_QID_S 0 +#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S) + #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3df22840fcd1..3c4a3bc31a89 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev, switch (loop) { case MAC_INTERNALLOOP_PHY: - if ((phy_dev) && (!phy_dev->is_c45)) + if ((phy_dev) && (!phy_dev->is_c45)) { ret = hns_nic_config_phy_loopback(phy_dev, 0x1); + ret |= h->dev->ops->set_loopback(h, loop, 0x1); + } break; case MAC_INTERNALLOOP_MAC: if ((h->dev->ops->set_loopback) && @@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, struct sk_buff *skb) { struct net_device *ndev; + struct hns_nic_priv *priv; struct hnae_ring *ring; struct netdev_queue *dev_queue; struct sk_buff *new_skb; @@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, char buff[33]; /* 32B data and the last character '\0' */ if (!ring_data) { /* Just for doing create frame*/ + ndev = skb->dev; + priv = netdev_priv(ndev); + frame_size = skb->len; memset(skb->data, 0xFF, frame_size); + if ((!AE_IS_VER1(priv->enet_ver)) && + (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) { + memcpy(skb->data, ndev->dev_addr, 6); + skb->data[5] += 0x1f; + } + frame_size &= ~1ul; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, @@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev, /* place data into test skb */ (void)skb_put(skb, size); + skb->dev = ndev; __lb_other_process(NULL, skb); skb->queue_mapping = NIC_LB_TEST_RING_ID; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 335417b4756b..ebe60719e489 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1166,7 +1166,10 @@ map_failed: if (!firmware_has_feature(FW_FEATURE_CMO)) netdev_err(netdev, "tx: unable to map xmit buffer\n"); adapter->tx_map_failed++; - skb_linearize(skb); + if (skb_linearize(skb)) { + netdev->stats.tx_dropped++; + goto out; + } force_bounce = 1; goto retry_bounce; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7d6570843723..6e9e16eee5d0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.cmd = REQUEST_CAPABILITY; crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = - cpu_to_be32(adapter->req_tx_entries_per_subcrq); + cpu_to_be64(adapter->req_tx_entries_per_subcrq); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = - cpu_to_be32(adapter->req_rx_add_entries_per_subcrq); + cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); - crq.request_capability.number = cpu_to_be32(adapter->req_mtu); + crq.request_capability.number = cpu_to_be64(adapter->req_mtu); ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { if (adapter->promisc_supported) { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); - crq.request_capability.number = cpu_to_be32(1); + crq.request_capability.number = cpu_to_be64(1); ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); - crq.request_capability.number = cpu_to_be32(0); + crq.request_capability.number = cpu_to_be64(0); ibmvnic_send_crq(adapter, &crq); } @@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, switch (be16_to_cpu(crq->query_capability.capability)) { case MIN_TX_QUEUES: adapter->min_tx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_tx_queues = %lld\n", adapter->min_tx_queues); break; case MIN_RX_QUEUES: adapter->min_rx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_queues = %lld\n", adapter->min_rx_queues); break; case MIN_RX_ADD_QUEUES: adapter->min_rx_add_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_add_queues = %lld\n", adapter->min_rx_add_queues); break; case MAX_TX_QUEUES: adapter->max_tx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_queues = %lld\n", adapter->max_tx_queues); break; case MAX_RX_QUEUES: adapter->max_rx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_queues = %lld\n", adapter->max_rx_queues); break; case MAX_RX_ADD_QUEUES: adapter->max_rx_add_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_add_queues = %lld\n", adapter->max_rx_add_queues); break; case MIN_TX_ENTRIES_PER_SUBCRQ: adapter->min_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", adapter->min_tx_entries_per_subcrq); break; case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: adapter->min_rx_add_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", adapter->min_rx_add_entries_per_subcrq); break; case MAX_TX_ENTRIES_PER_SUBCRQ: adapter->max_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", adapter->max_tx_entries_per_subcrq); break; case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: adapter->max_rx_add_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", adapter->max_rx_add_entries_per_subcrq); break; case TCP_IP_OFFLOAD: adapter->tcp_ip_offload = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "tcp_ip_offload = %lld\n", adapter->tcp_ip_offload); break; case PROMISC_SUPPORTED: adapter->promisc_supported = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "promisc_supported = %lld\n", adapter->promisc_supported); break; case MIN_MTU: - adapter->min_mtu = be32_to_cpu(crq->query_capability.number); + adapter->min_mtu = be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); break; case MAX_MTU: - adapter->max_mtu = be32_to_cpu(crq->query_capability.number); + adapter->max_mtu = be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); break; case MAX_MULTICAST_FILTERS: adapter->max_multicast_filters = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_multicast_filters = %lld\n", adapter->max_multicast_filters); break; case VLAN_HEADER_INSERTION: adapter->vlan_header_insertion = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); if (adapter->vlan_header_insertion) netdev->features |= NETIF_F_HW_VLAN_STAG_TX; netdev_dbg(netdev, "vlan_header_insertion = %lld\n", @@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, break; case MAX_TX_SG_ENTRIES: adapter->max_tx_sg_entries = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", adapter->max_tx_sg_entries); break; case RX_SG_SUPPORTED: adapter->rx_sg_supported = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "rx_sg_supported = %lld\n", adapter->rx_sg_supported); break; case OPT_TX_COMP_SUB_QUEUES: adapter->opt_tx_comp_sub_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", adapter->opt_tx_comp_sub_queues); break; case OPT_RX_COMP_QUEUES: adapter->opt_rx_comp_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", adapter->opt_rx_comp_queues); break; case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: adapter->opt_rx_bufadd_q_per_rx_comp_q = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", adapter->opt_rx_bufadd_q_per_rx_comp_q); break; case OPT_TX_ENTRIES_PER_SUBCRQ: adapter->opt_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", adapter->opt_tx_entries_per_subcrq); break; case OPT_RXBA_ENTRIES_PER_SUBCRQ: adapter->opt_rxba_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", adapter->opt_rxba_entries_per_subcrq); break; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 1242925ad34c..1a9993cc79b5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -319,10 +319,8 @@ struct ibmvnic_capability { u8 first; u8 cmd; __be16 capability; /* one of ibmvnic_capabilities */ + __be64 number; struct ibmvnic_rc rc; - __be32 number; /*FIX: should be __be64, but I'm getting the least - * significant word first - */ } __packed __aligned(8); struct ibmvnic_login { diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index fa593dd3efe1..3772f3ac956e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -83,6 +83,15 @@ config E1000E To compile this driver as a module, choose M here. The module will be called e1000e. +config E1000E_HWTS + bool "Support HW cross-timestamp on PCH devices" + default y + depends on E1000E && X86 + ---help--- + Say Y to enable hardware supported cross-timestamping on PCH + devices. The cross-timestamp is available through the PTP clock + driver precise cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE). + config IGB tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" depends on PCI diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index f7c7804d79e5..0641c0098738 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -528,6 +528,11 @@ #define E1000_RXCW_C 0x20000000 /* Receive config */ #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +/* HH Time Sync */ +#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ + #define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index b3949d5bef5c..4e733bf1a38e 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -92,6 +92,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ #define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ #define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LBG PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7 +#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8 +#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3 +#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index a049e30639a1..c0f4887ea44d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1252,9 +1252,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) ew32(H2ME, mac_reg); } - /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { - if (i++ == 10) { + if (i++ == 30) { ret_val = -E1000_ERR_PHY; goto out; } @@ -1328,6 +1328,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) I218_ULP_CONFIG1_RESET_TO_SMBUS | I218_ULP_CONFIG1_WOL_HOST | I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_EN_ULP_LANPHYPC | + I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | I218_ULP_CONFIG1_DISABLE_SMB_PERST); e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); @@ -1433,6 +1435,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) emi_addr = I217_RX_CONFIG; ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); + if (hw->mac.type == e1000_pch_lpt || + hw->mac.type == e1000_pch_spt) { + u16 phy_reg; + + e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg); + phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; + if (speed == SPEED_100 || speed == SPEED_10) + phy_reg |= 0x3E8; + else + phy_reg |= 0xFA; + e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + } hw->phy.ops.release(hw); if (ret_val) @@ -1467,6 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) hw->phy.ops.release(hw); if (ret_val) return ret_val; + } else { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_wphy_locked(hw, + PHY_REG(776, 20), + 0xC023); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + } } } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 34c551e322eb..2311f6003f58 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -188,6 +188,10 @@ #define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ #define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ #define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +/* enable ULP even if when phy powered down via lanphypc */ +#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400 +/* disable clear of sticky ULP on PERST */ +#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800 #define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ /* SMBus Address Phy Register */ @@ -226,6 +230,9 @@ #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 #define HV_PM_CTRL_K1_ENABLE 0x4000 +#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) +#define I217_PLL_CLOCK_GATE_MASK 0x07FF + #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ /* Inband Control */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index c71ba1bfc1ec..9b4ec13d9161 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7452,6 +7452,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 25a0ad5102d6..e2ff3ef75d5d 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -26,6 +26,12 @@ #include "e1000.h" +#ifdef CONFIG_E1000E_HWTS +#include <linux/clocksource.h> +#include <linux/ktime.h> +#include <asm/tsc.h> +#endif + /** * e1000e_phc_adjfreq - adjust the frequency of the hardware clock * @ptp: ptp clock structure @@ -98,6 +104,78 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } +#ifdef CONFIG_E1000E_HWTS +#define MAX_HW_WAIT_COUNT (3) + +/** + * e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers + * @device: current device time + * @system: system counter value read synchronously with device time + * @ctx: context provided by timekeeping code + * + * Read device and system (ART) clock simultaneously and return the corrected + * clock values in ns. + **/ +static int e1000e_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)ctx; + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + int i; + u32 tsync_ctrl; + cycle_t dev_cycles; + cycle_t sys_cycles; + + tsync_ctrl = er32(TSYNCTXCTL); + tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC | + E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK; + ew32(TSYNCTXCTL, tsync_ctrl); + for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) { + udelay(1); + tsync_ctrl = er32(TSYNCTXCTL); + if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP) + break; + } + + if (i == MAX_HW_WAIT_COUNT) + return -ETIMEDOUT; + + dev_cycles = er32(SYSSTMPH); + dev_cycles <<= 32; + dev_cycles |= er32(SYSSTMPL); + spin_lock_irqsave(&adapter->systim_lock, flags); + *device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles)); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + sys_cycles = er32(PLTSTMPH); + sys_cycles <<= 32; + sys_cycles |= er32(PLTSTMPL); + *system = convert_art_to_tsc(sys_cycles); + + return 0; +} + +/** + * e1000e_phc_getsynctime - Reads the current system/device cross timestamp + * @ptp: ptp clock structure + * @cts: structure containing timestamp + * + * Read device and system (ART) clock simultaneously and return the scaled + * clock values in ns. + **/ +static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + + return get_device_system_crosststamp(e1000e_phc_get_syncdevicetime, + adapter, NULL, xtstamp); +} +#endif/*CONFIG_E1000E_HWTS*/ + /** * e1000e_phc_gettime - Reads the current time from the hardware clock * @ptp: ptp clock structure @@ -236,6 +314,13 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) break; } +#ifdef CONFIG_E1000E_HWTS + /* CPU must have ART and GBe must be from Sunrise Point or greater */ + if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART)) + adapter->ptp_clock_info.getcrosststamp = + e1000e_phc_getcrosststamp; +#endif/*CONFIG_E1000E_HWTS*/ + INIT_DELAYED_WORK(&adapter->systim_overflow_work, e1000e_systim_overflow_work); diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 1d5e0b77062a..0cb4d365e5ad 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -245,6 +245,10 @@ #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */ +#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */ +#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */ +#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index b243c3cbe68f..4de17db3808c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -243,7 +243,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, /* Even if we own the page, we are not allowed to use atomic_set() * This would break get_page_unless_zero() users. */ - atomic_inc(&page->_count); + page_ref_inc(page); return true; } @@ -1937,8 +1937,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; u32 reta, base; - /* If the netdev is initialized we have to maintain table if possible */ - if (interface->netdev->reg_state != NETREG_UNINITIALIZED) { + /* If the Rx flow indirection table has been configured manually, we + * need to maintain it when possible. + */ + if (netif_is_rxfh_configured(interface->netdev)) { for (i = FM10K_RETA_SIZE; i--;) { reta = interface->reta[i]; if ((((reta << 24) >> 24) < rss_i) && @@ -1946,6 +1948,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) (((reta << 8) >> 24) < rss_i) && (((reta) >> 24) < rss_i)) continue; + + /* this should never happen */ + dev_err(&interface->pdev->dev, + "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); goto repopulate_reta; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 662569d5b7c0..d09a8dd71fc2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1204,6 +1204,15 @@ err_queueing_scheme: return err; } +static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return fm10k_setup_tc(dev, tc->tc); +} + static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -1386,7 +1395,7 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, .ndo_set_rx_mode = fm10k_set_rx_mode, .ndo_get_stats64 = fm10k_get_stats64, - .ndo_setup_tc = fm10k_setup_tc, + .ndo_setup_tc = __fm10k_setup_tc, .ndo_set_vf_mac = fm10k_ndo_set_vf_mac, .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index b4729ba57c9c..3b3c63e54ed6 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -41,6 +41,7 @@ i40e-objs := i40e_main.o \ i40e_diag.o \ i40e_txrx.o \ i40e_ptp.o \ + i40e_client.o \ i40e_virtchnl_pf.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 68f2204ec6f3..1ce6e9c0427d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -58,15 +58,13 @@ #ifdef I40E_FCOE #include "i40e_fcoe.h" #endif +#include "i40e_client.h" #include "i40e_virtchnl.h" #include "i40e_virtchnl_pf.h" #include "i40e_txrx.h" #include "i40e_dcb.h" /* Useful i40e defaults */ -#define I40E_BASE_PF_SEID 16 -#define I40E_BASE_VSI_SEID 512 -#define I40E_BASE_VEB_SEID 288 #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 @@ -104,6 +102,7 @@ #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_PRIV_FLAGS_PS BIT(4) +#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -113,6 +112,7 @@ #define I40E_OEM_VER_PATCH_MASK 0xff #define I40E_OEM_VER_BUILD_SHIFT 8 #define I40E_OEM_VER_SHIFT 24 +#define I40E_PHY_DEBUG_PORT BIT(4) /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 @@ -137,6 +137,19 @@ /* default to trying for four seconds */ #define I40E_TRY_LINK_TIMEOUT (4 * HZ) +/** + * i40e_is_mac_710 - Return true if MAC is X710/XL710 + * @hw: ptr to the hardware info + **/ +static inline bool i40e_is_mac_710(struct i40e_hw *hw) +{ + if ((hw->mac.type == I40E_MAC_X710) || + (hw->mac.type == I40E_MAC_XL710)) + return true; + + return false; +} + /* driver state flags */ enum i40e_state_t { __I40E_TESTING, @@ -178,6 +191,7 @@ struct i40e_lump_tracking { u16 search_hint; u16 list[0]; #define I40E_PILE_VALID_BIT 0x8000 +#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2) }; #define I40E_DEFAULT_ATR_SAMPLE_RATE 20 @@ -270,6 +284,8 @@ struct i40e_pf { #endif /* I40E_FCOE */ u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ + u16 num_iwarp_msix; /* num of iwarp vectors for this PF */ + int iwarp_base_vector; int queues_left; /* queues left unclaimed */ u16 alloc_rss_size; /* allocated RSS queues */ u16 rss_size_max; /* HW defined max RSS queues */ @@ -317,6 +333,7 @@ struct i40e_pf { #define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15) +#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) #define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17) #define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18) #define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) @@ -339,6 +356,12 @@ struct i40e_pf { #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) #define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) +#define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) +#define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44) +#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45) +#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46) +#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) +#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48) #define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ @@ -391,6 +414,7 @@ struct i40e_pf { struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ u32 vf_aq_requests; + u32 arq_overflows; /* Not fatal, possibly indicative of problems */ /* DCBx/DCBNL capability for PF that indicates * whether DCBx is managed by firmware or host @@ -423,6 +447,7 @@ struct i40e_pf { u32 ioremap_len; u32 fd_inv; + u16 phy_led_val; }; struct i40e_mac_filter { @@ -492,6 +517,7 @@ struct i40e_vsi { u32 tx_busy; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; u32 rx_buf_failed; u32 rx_page_failed; @@ -500,13 +526,6 @@ struct i40e_vsi { struct i40e_ring **tx_rings; u16 work_limit; - /* high bit set means dynamic, use accessor routines to read/write. - * hardware only supports 2us resolution for the ITR registers. - * these values always store the USER setting, and must be converted - * before programming to a register. - */ - u16 rx_itr_setting; - u16 tx_itr_setting; u16 int_rate_limit; /* value in usecs */ u16 rss_table_size; /* HW RSS table size */ @@ -557,6 +576,8 @@ struct i40e_vsi { struct kobject *kobj; /* sysfs object */ bool current_isup; /* Sync 'link up' logging */ + void *priv; /* client driver data reference. */ + /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); @@ -714,6 +735,10 @@ void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add); #endif +void i40e_service_event_schedule(struct i40e_pf *pf); +void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, + u8 *msg, u16 len); + int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, @@ -736,6 +761,17 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {} static inline void i40e_dbg_init(void) {} static inline void i40e_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS*/ +/* needed by client drivers */ +int i40e_lan_add_device(struct i40e_pf *pf); +int i40e_lan_del_device(struct i40e_pf *pf); +void i40e_client_subtask(struct i40e_pf *pf); +void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); +void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi); +void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); +void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); +void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); +int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, + enum i40e_client_type type); /** * i40e_irq_dynamic_enable - Enable default interrupt generation settings * @vsi: pointer to a vsi @@ -747,6 +783,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) struct i40e_hw *hw = &pf->hw; u32 val; + /* definitely clear the PBA here, as this function is meant to + * clean out all previous interrupts AND enable the interrupt + */ val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); @@ -754,9 +793,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) /* skip the flush */ } -void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); #ifdef I40E_FCOE struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct net_device *netdev, @@ -786,7 +824,8 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE int i40e_close(struct net_device *netdev); -int i40e_setup_tc(struct net_device *netdev, u8 tc); +int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); void i40e_netpoll(struct net_device *netdev); int i40e_fcoe_enable(struct net_device *netdev); int i40e_fcoe_disable(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 1fd5ea82a9bc..df8e2fd6a649 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -953,6 +953,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, u16 flags; u16 ntu; + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); @@ -1020,14 +1023,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; -clean_arq_element_out: - /* Set pending if needed, unlock and return */ - if (pending != NULL) - *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); - -clean_arq_element_err: - mutex_unlock(&hw->aq.arq_mutex); - if (i40e_is_nvm_update_op(&e->desc)) { if (hw->aq.nvm_release_on_done) { i40e_release_nvm(hw); @@ -1048,6 +1043,13 @@ clean_arq_element_err: } } +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); +clean_arq_element_err: + mutex_unlock(&hw->aq.arq_mutex); + return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b22012a446a6..8d5c65ab6267 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0004 +#define I40E_FW_API_VERSION_MINOR 0x0005 struct i40e_aq_desc { __le16 flags; @@ -145,6 +145,9 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -220,6 +223,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -228,6 +232,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -402,6 +407,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 @@ -422,6 +428,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -680,6 +687,31 @@ struct i40e_aqc_switch_resource_alloc_element_resp { I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + +/* Read Receive control registers (direct 0x0206) + * Write Receive control registers (direct 0x0207) + * used for accessing Rx control registers that can be + * slow and need special handling when under high Rx load + */ +struct i40e_aqc_rx_ctl_reg_read_write { + __le32 reserved1; + __le32 address; + __le32 reserved2; + __le32 value; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) @@ -906,7 +938,8 @@ struct i40e_aqc_add_veb { I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; @@ -973,6 +1006,7 @@ struct i40e_aqc_add_macvlan_element_data { #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ @@ -1069,6 +1103,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; @@ -1257,10 +1292,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 + +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; @@ -1755,7 +1796,12 @@ struct i40e_aqc_get_link_status { u8 config; #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; + u8 external_power_ability; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); @@ -1823,6 +1869,18 @@ enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) @@ -1912,6 +1970,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer { I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2191,6 +2265,7 @@ struct i40e_aqc_add_udp_tunnel { #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c new file mode 100644 index 000000000000..0e6ac841321c --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -0,0 +1,1012 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include <linux/list.h> +#include <linux/errno.h> + +#include "i40e.h" +#include "i40e_prototype.h" +#include "i40e_client.h" + +static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR; + +static LIST_HEAD(i40e_devices); +static DEFINE_MUTEX(i40e_device_mutex); + +static LIST_HEAD(i40e_clients); +static DEFINE_MUTEX(i40e_client_mutex); + +static LIST_HEAD(i40e_client_instances); +static DEFINE_MUTEX(i40e_client_instance_mutex); + +static int i40e_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u32 vf_id, u8 *msg, u16 len); + +static int i40e_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info); + +static void i40e_client_request_reset(struct i40e_info *ldev, + struct i40e_client *client, + u32 reset_level); + +static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, + struct i40e_client *client, + bool is_vf, u32 vf_id, + u32 flag, u32 valid_flag); + +static struct i40e_ops i40e_lan_ops = { + .virtchnl_send = i40e_client_virtchnl_send, + .setup_qvlist = i40e_client_setup_qvlist, + .request_reset = i40e_client_request_reset, + .update_vsi_ctxt = i40e_client_update_vsi_ctxt, +}; + +/** + * i40e_client_type_to_vsi_type - convert client type to vsi type + * @client_type: the i40e_client type + * + * returns the related vsi type value + **/ +static +enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type) +{ + switch (type) { + case I40E_CLIENT_IWARP: + return I40E_VSI_IWARP; + + case I40E_CLIENT_VMDQ2: + return I40E_VSI_VMDQ2; + + default: + pr_err("i40e: Client type unknown\n"); + return I40E_VSI_TYPE_UNKNOWN; + } +} + +/** + * i40e_client_get_params - Get the params that can change at runtime + * @vsi: the VSI with the message + * @param: clinet param struct + * + **/ +static +int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) +{ + struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config; + int i = 0; + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + u8 tc = dcb_cfg->etscfg.prioritytable[i]; + u16 qs_handle; + + /* If TC is not enabled for VSI use TC0 for UP */ + if (!(vsi->tc_config.enabled_tc & BIT(tc))) + tc = 0; + + qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]); + params->qos.prio_qos[i].tc = tc; + params->qos.prio_qos[i].qs_handle = qs_handle; + if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) { + dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n", + tc, vsi->id); + return -EINVAL; + } + } + + params->mtu = vsi->netdev->mtu; + return 0; +} + +/** + * i40e_notify_client_of_vf_msg - call the client vf message callback + * @vsi: the VSI with the message + * @vf_id: the absolute VF id that sent the message + * @msg: message buffer + * @len: length of the message + * + * If there is a client to this VSI, call the client + **/ +void +i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) +{ + struct i40e_client_instance *cdev; + + if (!vsi) + return; + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if (cdev->lan_info.pf == vsi->back) { + if (!cdev->client || + !cdev->client->ops || + !cdev->client->ops->virtchnl_receive) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance virtual channel receive routine\n"); + continue; + } + cdev->client->ops->virtchnl_receive(&cdev->lan_info, + cdev->client, + vf_id, msg, len); + } + } + mutex_unlock(&i40e_client_instance_mutex); +} + +/** + * i40e_notify_client_of_l2_param_changes - call the client notify callback + * @vsi: the VSI with l2 param changes + * + * If there is a client to this VSI, call the client + **/ +void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) +{ + struct i40e_client_instance *cdev; + struct i40e_params params; + + if (!vsi) + return; + memset(¶ms, 0, sizeof(params)); + i40e_client_get_params(vsi, ¶ms); + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if (cdev->lan_info.pf == vsi->back) { + if (!cdev->client || + !cdev->client->ops || + !cdev->client->ops->l2_param_change) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance l2_param_change routine\n"); + continue; + } + cdev->lan_info.params = params; + cdev->client->ops->l2_param_change(&cdev->lan_info, + cdev->client, + ¶ms); + } + } + mutex_unlock(&i40e_client_instance_mutex); +} + +/** + * i40e_notify_client_of_netdev_open - call the client open callback + * @vsi: the VSI with netdev opened + * + * If there is a client to this netdev, call the client with open + **/ +void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) +{ + struct i40e_client_instance *cdev; + + if (!vsi) + return; + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if (cdev->lan_info.netdev == vsi->netdev) { + if (!cdev->client || + !cdev->client->ops || !cdev->client->ops->open) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance open routine\n"); + continue; + } + cdev->client->ops->open(&cdev->lan_info, cdev->client); + } + } + mutex_unlock(&i40e_client_instance_mutex); +} + +/** + * i40e_client_release_qvlist + * @ldev: pointer to L2 context. + * + **/ +static void i40e_client_release_qvlist(struct i40e_info *ldev) +{ + struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info; + u32 i; + + if (!ldev->qvlist_info) + return; + + for (i = 0; i < qvlist_info->num_vectors; i++) { + struct i40e_pf *pf = ldev->pf; + struct i40e_qv_info *qv_info; + u32 reg_idx; + + qv_info = &qvlist_info->qv_info[i]; + if (!qv_info) + continue; + reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1); + wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); + } + kfree(ldev->qvlist_info); + ldev->qvlist_info = NULL; +} + +/** + * i40e_notify_client_of_netdev_close - call the client close callback + * @vsi: the VSI with netdev closed + * @reset: true when close called due to a reset pending + * + * If there is a client to this netdev, call the client with close + **/ +void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) +{ + struct i40e_client_instance *cdev; + + if (!vsi) + return; + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if (cdev->lan_info.netdev == vsi->netdev) { + if (!cdev->client || + !cdev->client->ops || !cdev->client->ops->close) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance close routine\n"); + continue; + } + cdev->client->ops->close(&cdev->lan_info, cdev->client, + reset); + i40e_client_release_qvlist(&cdev->lan_info); + } + } + mutex_unlock(&i40e_client_instance_mutex); +} + +/** + * i40e_notify_client_of_vf_reset - call the client vf reset callback + * @pf: PF device pointer + * @vf_id: asolute id of VF being reset + * + * If there is a client attached to this PF, notify when a VF is reset + **/ +void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) +{ + struct i40e_client_instance *cdev; + + if (!pf) + return; + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if (cdev->lan_info.pf == pf) { + if (!cdev->client || + !cdev->client->ops || + !cdev->client->ops->vf_reset) { + dev_dbg(&pf->pdev->dev, + "Cannot locate client instance VF reset routine\n"); + continue; + } + cdev->client->ops->vf_reset(&cdev->lan_info, + cdev->client, vf_id); + } + } + mutex_unlock(&i40e_client_instance_mutex); +} + +/** + * i40e_notify_client_of_vf_enable - call the client vf notification callback + * @pf: PF device pointer + * @num_vfs: the number of VFs currently enabled, 0 for disable + * + * If there is a client attached to this PF, call its VF notification routine + **/ +void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) +{ + struct i40e_client_instance *cdev; + + if (!pf) + return; + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if (cdev->lan_info.pf == pf) { + if (!cdev->client || + !cdev->client->ops || + !cdev->client->ops->vf_enable) { + dev_dbg(&pf->pdev->dev, + "Cannot locate client instance VF enable routine\n"); + continue; + } + cdev->client->ops->vf_enable(&cdev->lan_info, + cdev->client, num_vfs); + } + } + mutex_unlock(&i40e_client_instance_mutex); +} + +/** + * i40e_vf_client_capable - ask the client if it likes the specified VF + * @pf: PF device pointer + * @vf_id: the VF in question + * + * If there is a client of the specified type attached to this PF, call + * its vf_capable routine + **/ +int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, + enum i40e_client_type type) +{ + struct i40e_client_instance *cdev; + int capable = false; + + if (!pf) + return false; + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if (cdev->lan_info.pf == pf) { + if (!cdev->client || + !cdev->client->ops || + !cdev->client->ops->vf_capable || + !(cdev->client->type == type)) { + dev_dbg(&pf->pdev->dev, + "Cannot locate client instance VF capability routine\n"); + continue; + } + capable = cdev->client->ops->vf_capable(&cdev->lan_info, + cdev->client, + vf_id); + break; + } + } + mutex_unlock(&i40e_client_instance_mutex); + return capable; +} + +/** + * i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi + * @pf: board private structure + * @type: vsi type + * @start_vsi: a VSI pointer from where to start the search + * + * Returns non NULL on success or NULL for failure + **/ +struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, + enum i40e_vsi_type type, + struct i40e_vsi *start_vsi) +{ + struct i40e_vsi *vsi; + int i = 0; + + if (start_vsi) { + for (i = 0; i < pf->num_alloc_vsi; i++) { + vsi = pf->vsi[i]; + if (vsi == start_vsi) + break; + } + } + for (; i < pf->num_alloc_vsi; i++) { + vsi = pf->vsi[i]; + if (vsi && vsi->type == type) + return vsi; + } + + return NULL; +} + +/** + * i40e_client_add_instance - add a client instance struct to the instance list + * @pf: pointer to the board struct + * @client: pointer to a client struct in the client list. + * + * Returns cdev ptr on success, NULL on failure + **/ +static +struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, + struct i40e_client *client) +{ + struct i40e_client_instance *cdev; + struct netdev_hw_addr *mac = NULL; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry(cdev, &i40e_client_instances, list) { + if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { + cdev = NULL; + goto out; + } + } + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + goto out; + + cdev->lan_info.pf = (void *)pf; + cdev->lan_info.netdev = vsi->netdev; + cdev->lan_info.pcidev = pf->pdev; + cdev->lan_info.fid = pf->hw.pf_id; + cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF; + cdev->lan_info.hw_addr = pf->hw.hw_addr; + cdev->lan_info.ops = &i40e_lan_ops; + cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR; + cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR; + cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD; + cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver; + cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver; + cdev->lan_info.fw_build = pf->hw.aq.fw_build; + set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state); + + if (i40e_client_get_params(vsi, &cdev->lan_info.params)) { + kfree(cdev); + cdev = NULL; + goto out; + } + + cdev->lan_info.msix_count = pf->num_iwarp_msix; + cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; + + mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list, + struct netdev_hw_addr, list); + if (mac) + ether_addr_copy(cdev->lan_info.lanmac, mac->addr); + else + dev_err(&pf->pdev->dev, "MAC address list is empty!\n"); + + cdev->client = client; + INIT_LIST_HEAD(&cdev->list); + list_add(&cdev->list, &i40e_client_instances); +out: + mutex_unlock(&i40e_client_instance_mutex); + return cdev; +} + +/** + * i40e_client_del_instance - removes a client instance from the list + * @pf: pointer to the board struct + * + * Returns 0 on success or non-0 on error + **/ +static +int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client) +{ + struct i40e_client_instance *cdev, *tmp; + int ret = -ENODEV; + + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) { + if ((cdev->lan_info.pf != pf) || (cdev->client != client)) + continue; + + dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n", + client->name, pf->hw.pf_id, + pf->hw.bus.device, pf->hw.bus.func); + list_del(&cdev->list); + kfree(cdev); + ret = 0; + break; + } + mutex_unlock(&i40e_client_instance_mutex); + return ret; +} + +/** + * i40e_client_subtask - client maintenance work + * @pf: board private structure + **/ +void i40e_client_subtask(struct i40e_pf *pf) +{ + struct i40e_client_instance *cdev; + struct i40e_client *client; + int ret = 0; + + if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) + return; + pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED; + + /* If we're down or resetting, just bail */ + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) + return; + + /* Check client state and instantiate client if client registered */ + mutex_lock(&i40e_client_mutex); + list_for_each_entry(client, &i40e_clients, list) { + /* first check client is registered */ + if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state)) + continue; + + /* Do we also need the LAN VSI to be up, to create instance */ + if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) { + /* check if L2 VSI is up, if not we are not ready */ + if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) + continue; + } + + /* Add the client instance to the instance list */ + cdev = i40e_client_add_instance(pf, client); + if (!cdev) + continue; + + /* Also up the ref_cnt of no. of instances of this client */ + atomic_inc(&client->ref_cnt); + dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", + client->name, pf->hw.pf_id, + pf->hw.bus.device, pf->hw.bus.func); + + /* Send an Open request to the client */ + atomic_inc(&cdev->ref_cnt); + if (client->ops && client->ops->open) + ret = client->ops->open(&cdev->lan_info, client); + atomic_dec(&cdev->ref_cnt); + if (!ret) { + set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + } else { + /* remove client instance */ + i40e_client_del_instance(pf, client); + atomic_dec(&client->ref_cnt); + continue; + } + } + mutex_unlock(&i40e_client_mutex); +} + +/** + * i40e_lan_add_device - add a lan device struct to the list of lan devices + * @pf: pointer to the board struct + * + * Returns 0 on success or none 0 on error + **/ +int i40e_lan_add_device(struct i40e_pf *pf) +{ + struct i40e_device *ldev; + int ret = 0; + + mutex_lock(&i40e_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { + if (ldev->pf == pf) { + ret = -EEXIST; + goto out; + } + } + ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); + if (!ldev) { + ret = -ENOMEM; + goto out; + } + ldev->pf = pf; + INIT_LIST_HEAD(&ldev->list); + list_add(&ldev->list, &i40e_devices); + dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n", + pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); + + /* Since in some cases register may have happened before a device gets + * added, we can schedule a subtask to go initiate the clients. + */ + pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + i40e_service_event_schedule(pf); + +out: + mutex_unlock(&i40e_device_mutex); + return ret; +} + +/** + * i40e_lan_del_device - removes a lan device from the device list + * @pf: pointer to the board struct + * + * Returns 0 on success or non-0 on error + **/ +int i40e_lan_del_device(struct i40e_pf *pf) +{ + struct i40e_device *ldev, *tmp; + int ret = -ENODEV; + + mutex_lock(&i40e_device_mutex); + list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { + if (ldev->pf == pf) { + dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n", + pf->hw.pf_id, pf->hw.bus.device, + pf->hw.bus.func); + list_del(&ldev->list); + kfree(ldev); + ret = 0; + break; + } + } + + mutex_unlock(&i40e_device_mutex); + return ret; +} + +/** + * i40e_client_release - release client specific resources + * @client: pointer to the registered client + * + * Return 0 on success or < 0 on error + **/ +static int i40e_client_release(struct i40e_client *client) +{ + struct i40e_client_instance *cdev, *tmp; + struct i40e_pf *pf = NULL; + int ret = 0; + + LIST_HEAD(cdevs_tmp); + + mutex_lock(&i40e_client_instance_mutex); + list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) { + if (strncmp(cdev->client->name, client->name, + I40E_CLIENT_STR_LENGTH)) + continue; + if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + if (atomic_read(&cdev->ref_cnt) > 0) { + ret = I40E_ERR_NOT_READY; + goto out; + } + pf = (struct i40e_pf *)cdev->lan_info.pf; + if (client->ops && client->ops->close) + client->ops->close(&cdev->lan_info, client, + false); + i40e_client_release_qvlist(&cdev->lan_info); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + + dev_warn(&pf->pdev->dev, + "Client %s instance for PF id %d closed\n", + client->name, pf->hw.pf_id); + } + /* delete the client instance from the list */ + list_del(&cdev->list); + list_add(&cdev->list, &cdevs_tmp); + atomic_dec(&client->ref_cnt); + dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", + client->name); + } +out: + mutex_unlock(&i40e_client_instance_mutex); + + /* free the client device and release its vsi */ + list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) { + kfree(cdev); + } + return ret; +} + +/** + * i40e_client_prepare - prepare client specific resources + * @client: pointer to the registered client + * + * Return 0 on success or < 0 on error + **/ +static int i40e_client_prepare(struct i40e_client *client) +{ + struct i40e_device *ldev; + struct i40e_pf *pf; + int ret = 0; + + mutex_lock(&i40e_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { + pf = ldev->pf; + /* Start the client subtask */ + pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + i40e_service_event_schedule(pf); + } + mutex_unlock(&i40e_device_mutex); + return ret; +} + +/** + * i40e_client_virtchnl_send - TBD + * @ldev: pointer to L2 context + * @client: Client pointer + * @vf_id: absolute VF identifier + * @msg: message buffer + * @len: length of message buffer + * + * Return 0 on success or < 0 on error + **/ +static int i40e_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u32 vf_id, u8 *msg, u16 len) +{ + struct i40e_pf *pf = ldev->pf; + struct i40e_hw *hw = &pf->hw; + i40e_status err; + + err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP, + 0, msg, len, NULL); + if (err) + dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n", + err, hw->aq.asq_last_status); + + return err; +} + +/** + * i40e_client_setup_qvlist + * @ldev: pointer to L2 context. + * @client: Client pointer. + * @qv_info: queue and vector list + * + * Return 0 on success or < 0 on error + **/ +static int i40e_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info) +{ + struct i40e_pf *pf = ldev->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_qv_info *qv_info; + u32 v_idx, i, reg_idx, reg; + u32 size; + + size = sizeof(struct i40e_qvlist_info) + + (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1)); + ldev->qvlist_info = kzalloc(size, GFP_KERNEL); + ldev->qvlist_info->num_vectors = qvlist_info->num_vectors; + + for (i = 0; i < qvlist_info->num_vectors; i++) { + qv_info = &qvlist_info->qv_info[i]; + if (!qv_info) + continue; + v_idx = qv_info->v_idx; + + /* Validate vector id belongs to this client */ + if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) || + (v_idx < pf->iwarp_base_vector)) + goto err; + + ldev->qvlist_info->qv_info[i] = *qv_info; + reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1); + + if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) { + /* Special case - No CEQ mapped on this vector */ + wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); + } else { + reg = (qv_info->ceq_idx & + I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) | + (I40E_QUEUE_TYPE_PE_CEQ << + I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); + wr32(hw, reg_idx, reg); + + reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK | + (v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) | + (qv_info->itr_idx << + I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) | + (I40E_QUEUE_END_OF_LIST << + I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)); + wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg); + } + if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { + reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK | + (v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) | + (qv_info->itr_idx << + I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)); + + wr32(hw, I40E_PFINT_AEQCTL, reg); + } + } + + return 0; +err: + kfree(ldev->qvlist_info); + ldev->qvlist_info = NULL; + return -EINVAL; +} + +/** + * i40e_client_request_reset + * @ldev: pointer to L2 context. + * @client: Client pointer. + * @level: reset level + **/ +static void i40e_client_request_reset(struct i40e_info *ldev, + struct i40e_client *client, + u32 reset_level) +{ + struct i40e_pf *pf = ldev->pf; + + switch (reset_level) { + case I40E_CLIENT_RESET_LEVEL_PF: + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + break; + case I40E_CLIENT_RESET_LEVEL_CORE: + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + break; + default: + dev_warn(&pf->pdev->dev, + "Client %s instance for PF id %d request an unsupported reset: %d.\n", + client->name, pf->hw.pf_id, reset_level); + break; + } + + i40e_service_event_schedule(pf); +} + +/** + * i40e_client_update_vsi_ctxt + * @ldev: pointer to L2 context. + * @client: Client pointer. + * @is_vf: if this for the VF + * @vf_id: if is_vf true this carries the vf_id + * @flag: Any device level setting that needs to be done for PE + * @valid_flag: Bits in this match up and enable changing of flag bits + * + * Return 0 on success or < 0 on error + **/ +static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, + struct i40e_client *client, + bool is_vf, u32 vf_id, + u32 flag, u32 valid_flag) +{ + struct i40e_pf *pf = ldev->pf; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + bool update = true; + i40e_status err; + + /* TODO: for now do not allow setting VF's VSI setting */ + if (is_vf) + return -EINVAL; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + if (err) { + dev_info(&pf->pdev->dev, + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return -ENOENT; + } + + if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) && + (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) { + ctxt.info.valid_sections = + cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; + } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) && + !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) { + ctxt.info.valid_sections = + cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA; + } else { + update = false; + dev_warn(&pf->pdev->dev, + "Client %s instance for PF id %d request an unsupported Config: %x.\n", + client->name, pf->hw.pf_id, flag); + } + + if (update) { + err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (err) { + dev_info(&pf->pdev->dev, + "update VSI ctxt for PE failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + } + } + return err; +} + +/** + * i40e_register_client - Register a i40e client driver with the L2 driver + * @client: pointer to the i40e_client struct + * + * Returns 0 on success or non-0 on error + **/ +int i40e_register_client(struct i40e_client *client) +{ + int ret = 0; + enum i40e_vsi_type vsi_type; + + if (!client) { + ret = -EIO; + goto out; + } + + if (strlen(client->name) == 0) { + pr_info("i40e: Failed to register client with no name\n"); + ret = -EIO; + goto out; + } + + mutex_lock(&i40e_client_mutex); + if (i40e_client_is_registered(client)) { + pr_info("i40e: Client %s has already been registered!\n", + client->name); + mutex_unlock(&i40e_client_mutex); + ret = -EEXIST; + goto out; + } + + if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) || + (client->version.minor != I40E_CLIENT_VERSION_MINOR)) { + pr_info("i40e: Failed to register client %s due to mismatched client interface version\n", + client->name); + pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", + client->version.major, client->version.minor, + client->version.build, + i40e_client_interface_version_str); + mutex_unlock(&i40e_client_mutex); + ret = -EIO; + goto out; + } + + vsi_type = i40e_client_type_to_vsi_type(client->type); + if (vsi_type == I40E_VSI_TYPE_UNKNOWN) { + pr_info("i40e: Failed to register client %s due to unknown client type %d\n", + client->name, client->type); + mutex_unlock(&i40e_client_mutex); + ret = -EIO; + goto out; + } + list_add(&client->list, &i40e_clients); + set_bit(__I40E_CLIENT_REGISTERED, &client->state); + mutex_unlock(&i40e_client_mutex); + + if (i40e_client_prepare(client)) { + ret = -EIO; + goto out; + } + + pr_info("i40e: Registered client %s with return code %d\n", + client->name, ret); +out: + return ret; +} +EXPORT_SYMBOL(i40e_register_client); + +/** + * i40e_unregister_client - Unregister a i40e client driver with the L2 driver + * @client: pointer to the i40e_client struct + * + * Returns 0 on success or non-0 on error + **/ +int i40e_unregister_client(struct i40e_client *client) +{ + int ret = 0; + + /* When a unregister request comes through we would have to send + * a close for each of the client instances that were opened. + * client_release function is called to handle this. + */ + if (!client || i40e_client_release(client)) { + ret = -EIO; + goto out; + } + + /* TODO: check if device is in reset, or if that matters? */ + mutex_lock(&i40e_client_mutex); + if (!i40e_client_is_registered(client)) { + pr_info("i40e: Client %s has not been registered\n", + client->name); + mutex_unlock(&i40e_client_mutex); + ret = -ENODEV; + goto out; + } + if (atomic_read(&client->ref_cnt) == 0) { + clear_bit(__I40E_CLIENT_REGISTERED, &client->state); + list_del(&client->list); + pr_info("i40e: Unregistered client %s with return code %d\n", + client->name, ret); + } else { + ret = I40E_ERR_NOT_READY; + pr_err("i40e: Client %s failed unregister - client has open instances\n", + client->name); + } + + mutex_unlock(&i40e_client_mutex); +out: + return ret; +} +EXPORT_SYMBOL(i40e_unregister_client); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h new file mode 100644 index 000000000000..bf6b453d93a1 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -0,0 +1,232 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_CLIENT_H_ +#define _I40E_CLIENT_H_ + +#define I40E_CLIENT_STR_LENGTH 10 + +/* Client interface version should be updated anytime there is a change in the + * existing APIs or data structures. + */ +#define I40E_CLIENT_VERSION_MAJOR 0 +#define I40E_CLIENT_VERSION_MINOR 01 +#define I40E_CLIENT_VERSION_BUILD 00 +#define I40E_CLIENT_VERSION_STR \ + XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \ + XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \ + XSTRINGIFY(I40E_CLIENT_VERSION_BUILD) + +struct i40e_client_version { + u8 major; + u8 minor; + u8 build; + u8 rsvd; +}; + +enum i40e_client_state { + __I40E_CLIENT_NULL, + __I40E_CLIENT_REGISTERED +}; + +enum i40e_client_instance_state { + __I40E_CLIENT_INSTANCE_NONE, + __I40E_CLIENT_INSTANCE_OPENED, +}; + +enum i40e_client_type { + I40E_CLIENT_IWARP, + I40E_CLIENT_VMDQ2 +}; + +struct i40e_ops; +struct i40e_client; + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_qvlist_info { + u32 num_vectors; + struct i40e_qv_info qv_info[1]; +}; + +#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF + +/* set of LAN parameters useful for clients managed by LAN */ + +/* Struct to hold per priority info */ +struct i40e_prio_qos_params { + u16 qs_handle; /* qs handle for prio */ + u8 tc; /* TC mapped to prio */ + u8 reserved; +}; + +#define I40E_CLIENT_MAX_USER_PRIORITY 8 +/* Struct to hold Client QoS */ +struct i40e_qos_params { + struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; +}; + +struct i40e_params { + struct i40e_qos_params qos; + u16 mtu; +}; + +/* Structure to hold Lan device info for a client device */ +struct i40e_info { + struct i40e_client_version version; + u8 lanmac[6]; + struct net_device *netdev; + struct pci_dev *pcidev; + u8 __iomem *hw_addr; + u8 fid; /* function id, PF id or VF id */ +#define I40E_CLIENT_FTYPE_PF 0 +#define I40E_CLIENT_FTYPE_VF 1 + u8 ftype; /* function type, PF or VF */ + void *pf; + + /* All L2 params that could change during the life span of the PF + * and needs to be communicated to the client when they change + */ + struct i40e_qvlist_info *qvlist_info; + struct i40e_params params; + struct i40e_ops *ops; + + u16 msix_count; /* number of msix vectors*/ + /* Array down below will be dynamically allocated based on msix_count */ + struct msix_entry *msix_entries; + u16 itr_index; /* Which ITR index the PE driver is suppose to use */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ +}; + +#define I40E_CLIENT_RESET_LEVEL_PF 1 +#define I40E_CLIENT_RESET_LEVEL_CORE 2 +#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1) + +struct i40e_ops { + /* setup_q_vector_list enables queues with a particular vector */ + int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, + struct i40e_qvlist_info *qv_info); + + int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, + u32 vf_id, u8 *msg, u16 len); + + /* If the PE Engine is unresponsive, RDMA driver can request a reset. + * The level helps determine the level of reset being requested. + */ + void (*request_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 level); + + /* API for the RDMA driver to set certain VSI flags that control + * PE Engine. + */ + int (*update_vsi_ctxt)(struct i40e_info *ldev, + struct i40e_client *client, + bool is_vf, u32 vf_id, + u32 flag, u32 valid_flag); +}; + +struct i40e_client_ops { + /* Should be called from register_client() or whenever PF is ready + * to create a specific client instance. + */ + int (*open)(struct i40e_info *ldev, struct i40e_client *client); + + /* Should be called when netdev is unavailable or when unregister + * call comes in. If the close is happenening due to a reset being + * triggered set the reset bit to true. + */ + void (*close)(struct i40e_info *ldev, struct i40e_client *client, + bool reset); + + /* called when some l2 managed parameters changes - mtu */ + void (*l2_param_change)(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_params *params); + + int (*virtchnl_receive)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id, + u8 *msg, u16 len); + + /* called when a VF is reset by the PF */ + void (*vf_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); + + /* called when the number of VFs changes */ + void (*vf_enable)(struct i40e_info *ldev, + struct i40e_client *client, u32 num_vfs); + + /* returns true if VF is capable of specified offload */ + int (*vf_capable)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); +}; + +/* Client device */ +struct i40e_client_instance { + struct list_head list; + struct i40e_info lan_info; + struct i40e_client *client; + unsigned long state; + /* A count of all the in-progress calls to the client */ + atomic_t ref_cnt; +}; + +struct i40e_client { + struct list_head list; /* list of registered clients */ + char name[I40E_CLIENT_STR_LENGTH]; + struct i40e_client_version version; + unsigned long state; /* client state */ + atomic_t ref_cnt; /* Count of all the client devices of this kind */ + u32 flags; +#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) +#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) + enum i40e_client_type type; + struct i40e_client_ops *ops; /* client ops provided by the client */ +}; + +static inline bool i40e_client_is_registered(struct i40e_client *client) +{ + return test_bit(__I40E_CLIENT_REGISTERED, &client->state); +} + +/* used by clients */ +int i40e_register_client(struct i40e_client *client); +int i40e_unregister_client(struct i40e_client *client); + +#endif /* _I40E_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6a034ddac36a..4596294c2ab1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -55,19 +55,13 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_KX_X722: + case I40E_DEV_ID_QSFP_X722: case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: hw->mac.type = I40E_MAC_X722; break; - case I40E_DEV_ID_X722_VF: - case I40E_DEV_ID_X722_VF_HV: - hw->mac.type = I40E_MAC_X722_VF; - break; - case I40E_DEV_ID_VF: - case I40E_DEV_ID_VF_HV: - hw->mac.type = I40E_MAC_VF; - break; default: hw->mac.type = I40E_MAC_GENERIC; break; @@ -1245,7 +1239,13 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - for (cnt = 0; cnt < grst_del + 10; cnt++) { + + /* It can take upto 15 secs for GRST steady state. + * Bump it to 16 secs max to be safe. + */ + grst_del = grst_del * 20; + + for (cnt = 0; cnt < grst_del; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; @@ -1894,6 +1894,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, } /** + * i40e_aq_set_phy_debug + * @hw: pointer to the hw struct + * @cmd_flags: debug command flags + * @cmd_details: pointer to command details structure or NULL + * + * Reset the external PHY. + **/ +enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_debug *cmd = + (struct i40e_aqc_set_phy_debug *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_debug); + + cmd->command_flags = cmd_flags; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -1958,12 +1984,19 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (set) + if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + flags |= I40E_AQC_SET_VSI_PROMISC_TX; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2039,6 +2072,37 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, } /** + * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -2283,8 +2347,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: true for default port VSI, false for control port - * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support * @veb_seid: pointer to where to put the resulting VEB SEID + * @enable_stats: true to turn on VEB stats * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink @@ -2292,8 +2356,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) **/ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, bool enable_l2_filtering, - u16 *veb_seid, + bool default_port, u16 *veb_seid, + bool enable_stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -2320,8 +2384,9 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; - if (enable_l2_filtering) - veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; + /* reverse logic here: set the bitflag to disable the stats */ + if (!enable_stats) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; cmd->veb_flags = cpu_to_le16(veb_flags); @@ -2410,6 +2475,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, (struct i40e_aqc_macvlan *)&desc.params.raw; i40e_status status; u16 buf_size; + int i; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; @@ -2423,12 +2489,17 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, cmd->seid[1] = 0; cmd->seid[2] = 0; + for (i = 0; i < count; i++) + if (is_multicast_ether_addr(mv_list[i].mac_addr)) + mv_list[i].flags |= + cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, - cmd_details); + cmd_details); return status; } @@ -2476,6 +2547,137 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, } /** + * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule + * @hw: pointer to the hw struct + * @opcode: AQ opcode for add or delete mirror rule + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @id: Destination VSI SEID or Rule ID + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for + * VEBs/VEPA elements only + **/ +static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, + u16 opcode, u16 sw_seid, u16 rule_type, u16 id, + u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule *cmd = + (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; + struct i40e_aqc_add_delete_mirror_rule_completion *resp = + (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; + i40e_status status; + u16 buf_size; + + buf_size = count * sizeof(*mr_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, opcode); + cmd->seid = cpu_to_le16(sw_seid); + cmd->rule_type = cpu_to_le16(rule_type & + I40E_AQC_MIRROR_RULE_TYPE_MASK); + cmd->num_entries = cpu_to_le16(count); + /* Dest VSI for add, rule_id for delete */ + cmd->destination = cpu_to_le16(id); + if (mr_list) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + } + + status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, + cmd_details); + if (!status || + hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { + if (rule_id) + *rule_id = le16_to_cpu(resp->rule_id); + if (rules_used) + *rules_used = le16_to_cpu(resp->mirror_rules_used); + if (rules_free) + *rules_free = le16_to_cpu(resp->mirror_rules_free); + } + return status; +} + +/** + * i40e_aq_add_mirrorrule - add a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @dest_vsi: SEID of VSI to which packets will be mirrored + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only + **/ +i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || + rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, + rule_type, dest_vsi, count, mr_list, + cmd_details, rule_id, rules_used, rules_free); +} + +/** + * i40e_aq_delete_mirrorrule - delete a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @count: length of the list + * @rule_id: Rule ID that is returned in the receive desc as part of + * add_mirrorrule. + * @mr_list: list of mirrored VLAN IDs to be removed + * @cmd_details: pointer to command details structure or NULL + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only + **/ +i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free) +{ + /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ + if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) { + if (!rule_id) + return I40E_ERR_PARAM; + } else { + /* count and mr_list shall be valid for rule_type INGRESS VLAN + * mirroring. For other rule_type, count and rule_type should + * not matter. + */ + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, + rule_type, rule_id, count, mr_list, + cmd_details, NULL, rules_used, rules_free); +} + +/** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: VF id to send msg @@ -2765,35 +2967,6 @@ i40e_aq_erase_nvm_exit: return status; } -#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01 -#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02 -#define I40E_DEV_FUNC_CAP_NPAR 0x03 -#define I40E_DEV_FUNC_CAP_OS2BMC 0x04 -#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05 -#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12 -#define I40E_DEV_FUNC_CAP_VF 0x13 -#define I40E_DEV_FUNC_CAP_VMDQ 0x14 -#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15 -#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16 -#define I40E_DEV_FUNC_CAP_VSI 0x17 -#define I40E_DEV_FUNC_CAP_DCB 0x18 -#define I40E_DEV_FUNC_CAP_FCOE 0x21 -#define I40E_DEV_FUNC_CAP_ISCSI 0x22 -#define I40E_DEV_FUNC_CAP_RSS 0x40 -#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 -#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 -#define I40E_DEV_FUNC_CAP_MSIX 0x43 -#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 -#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 -#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 -#define I40E_DEV_FUNC_CAP_CEM 0xF2 -#define I40E_DEV_FUNC_CAP_IWARP 0x51 -#define I40E_DEV_FUNC_CAP_LED 0x61 -#define I40E_DEV_FUNC_CAP_SDP 0x62 -#define I40E_DEV_FUNC_CAP_MDIO 0x63 -#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 - /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct @@ -2832,79 +3005,79 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, major_rev = cap->major_rev; switch (id) { - case I40E_DEV_FUNC_CAP_SWITCH_MODE: + case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; break; - case I40E_DEV_FUNC_CAP_MGMT_MODE: + case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; break; - case I40E_DEV_FUNC_CAP_NPAR: + case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; break; - case I40E_DEV_FUNC_CAP_OS2BMC: + case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; break; - case I40E_DEV_FUNC_CAP_VALID_FUNC: + case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; break; - case I40E_DEV_FUNC_CAP_SRIOV_1_1: + case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = true; break; - case I40E_DEV_FUNC_CAP_VF: + case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; break; - case I40E_DEV_FUNC_CAP_VMDQ: + case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = true; break; - case I40E_DEV_FUNC_CAP_802_1_QBG: + case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = true; break; - case I40E_DEV_FUNC_CAP_802_1_QBH: + case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = true; break; - case I40E_DEV_FUNC_CAP_VSI: + case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; break; - case I40E_DEV_FUNC_CAP_DCB: + case I40E_AQ_CAP_ID_DCB: if (number == 1) { p->dcb = true; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } break; - case I40E_DEV_FUNC_CAP_FCOE: + case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = true; break; - case I40E_DEV_FUNC_CAP_ISCSI: + case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = true; break; - case I40E_DEV_FUNC_CAP_RSS: + case I40E_AQ_CAP_ID_RSS: p->rss = true; p->rss_table_size = number; p->rss_table_entry_width = logical_id; break; - case I40E_DEV_FUNC_CAP_RX_QUEUES: + case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; break; - case I40E_DEV_FUNC_CAP_TX_QUEUES: + case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; break; - case I40E_DEV_FUNC_CAP_MSIX: + case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; break; - case I40E_DEV_FUNC_CAP_MSIX_VF: + case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_FLEX10: + case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { if (number == 1) { p->flex10_enable = true; @@ -2920,38 +3093,38 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->flex10_mode = logical_id; p->flex10_status = phys_id; break; - case I40E_DEV_FUNC_CAP_CEM: + case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = true; break; - case I40E_DEV_FUNC_CAP_IWARP: + case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = true; break; - case I40E_DEV_FUNC_CAP_LED: + case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = true; break; - case I40E_DEV_FUNC_CAP_SDP: + case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = true; break; - case I40E_DEV_FUNC_CAP_MDIO: + case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } break; - case I40E_DEV_FUNC_CAP_IEEE_1588: + case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = true; break; - case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR: + case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = true; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; - case I40E_DEV_FUNC_CAP_WR_CSR_PROT: + case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; break; @@ -3709,7 +3882,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, return ret; /* Read the PF Queue Filter control register */ - val = rd32(hw, I40E_PFQF_CTL_0); + val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; @@ -3746,7 +3919,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, if (settings->enable_macvlan) val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; - wr32(hw, I40E_PFQF_CTL_0, val); + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); return 0; } @@ -4073,3 +4246,454 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, return status; } + +/** + * i40e_read_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_adr: PHY address on MDIO interface + * @value: PHY register value + * + * Reads specified PHY register value + **/ +i40e_status i40e_read_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, + u16 *value) +{ + i40e_status status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_ADDRESS) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_read_end; + } + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_READ) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + + if (!status) { + command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); + *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> + I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; + } else { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't read register value from external PHY.\n"); + } + +phy_read_end: + return status; +} + +/** + * i40e_write_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_adr: PHY address on MDIO interface + * @value: PHY register value + * + * Writes value to specified PHY register + **/ +i40e_status i40e_write_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, + u16 value) +{ + i40e_status status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_ADDRESS) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_write_end; + } + + command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; + wr32(hw, I40E_GLGEN_MSRWD(port_num), command); + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_WRITE) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + +phy_write_end: + return status; +} + +/** + * i40e_get_phy_address + * @hw: pointer to the HW structure + * @dev_num: PHY port num that address we want + * @phy_addr: Returned PHY address + * + * Gets PHY address for current port + **/ +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) +{ + u8 port_num = hw->func_caps.mdio_port_num; + u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); + + return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; +} + +/** + * i40e_blink_phy_led + * @hw: pointer to the HW structure + * @time: time how long led will blinks in secs + * @interval: gap between LED on and off in msecs + * + * Blinks PHY link LED + **/ +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval) +{ + i40e_status status = 0; + u32 i; + u16 led_ctl; + u16 gpio_led_port; + u16 led_reg; + u16 led_addr = I40E_PHY_LED_PROV_REG_1; + u8 phy_addr = 0; + u8 port_num; + + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + led_addr++) { + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, &led_reg); + if (status) + goto phy_blinking_end; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + led_reg); + if (status) + goto phy_blinking_end; + break; + } + } + + if (time > 0 && interval > 0) { + for (i = 0; i < time * 1000; i += interval) { + status = i40e_read_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + &led_reg); + if (status) + goto restore_config; + if (led_reg & I40E_PHY_LED_MANUAL_ON) + led_reg = 0; + else + led_reg = I40E_PHY_LED_MANUAL_ON; + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + led_reg); + if (status) + goto restore_config; + msleep(interval); + } + } + +restore_config: + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, led_ctl); + +phy_blinking_end: + return status; +} + +/** + * i40e_led_get_phy - return current on/off mode + * @hw: pointer to the hw struct + * @led_addr: address of led register to use + * @val: original value of register to use + * + **/ +i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val) +{ + i40e_status status = 0; + u16 gpio_led_port; + u8 phy_addr = 0; + u16 reg_val; + u16 temp_addr; + u8 port_num; + u32 i; + + temp_addr = I40E_PHY_LED_PROV_REG_1; + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + temp_addr++) { + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + temp_addr, phy_addr, ®_val); + if (status) + return status; + *val = reg_val; + if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { + *led_addr = temp_addr; + break; + } + } + return status; +} + +/** + * i40e_led_set_phy + * @hw: pointer to the HW structure + * @on: true or false + * @mode: original val plus bit for set or ignore + * Set led's on or off when controlled by the PHY + * + **/ +i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode) +{ + i40e_status status = 0; + u16 led_ctl = 0; + u16 led_reg = 0; + u8 phy_addr = 0; + u8 port_num; + u32 i; + + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, &led_reg); + if (status) + return status; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_reg); + if (status) + return status; + } + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, &led_reg); + if (status) + goto restore_config; + if (on) + led_reg = I40E_PHY_LED_MANUAL_ON; + else + led_reg = 0; + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_reg); + if (status) + goto restore_config; + if (mode & I40E_PHY_LED_MODE_ORIG) { + led_ctl = (mode & I40E_PHY_LED_MODE_MASK); + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_ctl); + } + return status; +restore_config: + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, led_ctl); + return status; +} + +/** + * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: ptr to register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to read the Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); + + cmd_resp->address = cpu_to_le32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == 0) + *reg_val = le32_to_cpu(cmd_resp->value); + + return status; +} + +/** + * i40e_read_rx_ctl - read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + **/ +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + u32 val = 0; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + val = rd32(hw, reg_addr); + + return val; +} + +/** + * i40e_aq_rx_ctl_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to write to an Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); + + cmd->address = cpu_to_le32(reg_addr); + cmd->value = cpu_to_le32(reg_val); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_write_rx_ctl - write to an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + **/ +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_write_register(hw, reg_addr, + reg_val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + wr32(hw, reg_addr, reg_val); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 2691277c0055..0fab3a9b51d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -380,17 +380,20 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, { u16 length, typelength, offset = 0; struct i40e_cee_app_prio *app; - u8 i, up, selector; + u8 i; typelength = ntohs(tlv->hdr.typelen); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); dcbcfg->numapps = length / sizeof(*app); + if (!dcbcfg->numapps) return; for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { if (app->prio_map & BIT(up)) @@ -400,13 +403,17 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, /* Get Selector from lower 2 bits, and convert to IEEE */ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); - if (selector == I40E_CEE_APP_SEL_ETHTYPE) + switch (selector) { + case I40E_CEE_APP_SEL_ETHTYPE: dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; - else if (selector == I40E_CEE_APP_SEL_TCPIP) + break; + case I40E_CEE_APP_SEL_TCPIP: dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; - else + break; + default: /* Keep selector as it is for unknown types */ dcbcfg->app[i].selector = selector; + } dcbcfg->app[i].protocolid = ntohs(app->protocol); /* Move to next app */ @@ -814,13 +821,15 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; - /* If Firmware version < v4.33 IEEE only */ - if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || - (hw->aq.fw_maj_ver < 4)) + /* If Firmware version < v4.33 on X710/XL710, IEEE only */ + if ((hw->mac.type == I40E_MAC_XL710) && + (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4))) return i40e_get_ieee_dcb_config(hw); - /* If Firmware version == v4.33 use old CEE struct */ - if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ + if ((hw->mac.type == I40E_MAC_XL710) && + ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); if (!ret) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 10744a698d6f..0c97733d253c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -61,257 +61,13 @@ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) { int i; - if ((seid < I40E_BASE_VEB_SEID) || - (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB))) - dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); - else - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && pf->veb[i]->seid == seid) - return pf->veb[i]; + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == seid) + return pf->veb[i]; return NULL; } /************************************************************** - * dump - * The dump entry in debugfs is for getting a data snapshow of - * the driver's current configuration and runtime details. - * When the filesystem entry is written, a snapshot is taken. - * When the entry is read, the most recent snapshot data is dumped. - **************************************************************/ -static char *i40e_dbg_dump_buf; -static ssize_t i40e_dbg_dump_data_len; -static ssize_t i40e_dbg_dump_buffer_len; - -/** - * i40e_dbg_dump_read - read the dump data - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - int bytes_not_copied; - int len; - - /* is *ppos bigger than the available data? */ - if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf) - return 0; - - /* be sure to not read beyond the end of available data */ - len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); - - bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); - if (bytes_not_copied) - return -EFAULT; - - *ppos += len; - return len; -} - -/** - * i40e_dbg_prep_dump_buf - * @pf: the PF we're working with - * @buflen: the desired buffer length - * - * Return positive if success, 0 if failed - **/ -static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen) -{ - /* if not already big enough, prep for re alloc */ - if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) { - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buffer_len = 0; - i40e_dbg_dump_buf = NULL; - } - - /* get a new buffer if needed */ - if (!i40e_dbg_dump_buf) { - i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL); - if (i40e_dbg_dump_buf != NULL) - i40e_dbg_dump_buffer_len = buflen; - } - - return i40e_dbg_dump_buffer_len; -} - -/** - * i40e_dbg_dump_write - trigger a datadump snapshot - * @filp: the opened file - * @buffer: where to find the user's data - * @count: the length of the user's data - * @ppos: file position offset - * - * Any write clears the stats - **/ -static ssize_t i40e_dbg_dump_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct i40e_pf *pf = filp->private_data; - bool seid_found = false; - long seid = -1; - int buflen = 0; - int i, ret; - int len; - u8 *p; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - - /* decode the SEID given to be dumped */ - ret = kstrtol_from_user(buffer, count, 0, &seid); - - if (ret) { - dev_info(&pf->pdev->dev, "bad seid value\n"); - } else if (seid == 0) { - seid_found = true; - - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buffer_len = 0; - i40e_dbg_dump_data_len = 0; - i40e_dbg_dump_buf = NULL; - dev_info(&pf->pdev->dev, "debug buffer freed\n"); - - } else if (seid == pf->pf_seid || seid == 1) { - seid_found = true; - - buflen = sizeof(struct i40e_pf); - buflen += (sizeof(struct i40e_aq_desc) - * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries)); - - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - p = i40e_dbg_dump_buf; - - len = sizeof(struct i40e_pf); - memcpy(p, pf, len); - p += len; - - len = (sizeof(struct i40e_aq_desc) - * pf->hw.aq.num_asq_entries); - memcpy(p, pf->hw.aq.asq.desc_buf.va, len); - p += len; - - len = (sizeof(struct i40e_aq_desc) - * pf->hw.aq.num_arq_entries); - memcpy(p, pf->hw.aq.arq.desc_buf.va, len); - p += len; - - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "PF seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - } else if (seid >= I40E_BASE_VSI_SEID) { - struct i40e_vsi *vsi = NULL; - struct i40e_mac_filter *f; - int filter_count = 0; - - mutex_lock(&pf->switch_mutex); - vsi = i40e_dbg_find_vsi(pf, seid); - if (!vsi) { - mutex_unlock(&pf->switch_mutex); - goto write_exit; - } - - buflen = sizeof(struct i40e_vsi); - buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors; - buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs; - buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs; - buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs; - list_for_each_entry(f, &vsi->mac_filter_list, list) - filter_count++; - buflen += sizeof(struct i40e_mac_filter) * filter_count; - - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - p = i40e_dbg_dump_buf; - seid_found = true; - - len = sizeof(struct i40e_vsi); - memcpy(p, vsi, len); - p += len; - - if (vsi->num_q_vectors) { - len = (sizeof(struct i40e_q_vector) - * vsi->num_q_vectors); - memcpy(p, vsi->q_vectors, len); - p += len; - } - - if (vsi->num_queue_pairs) { - len = (sizeof(struct i40e_ring) * - vsi->num_queue_pairs); - memcpy(p, vsi->tx_rings, len); - p += len; - memcpy(p, vsi->rx_rings, len); - p += len; - } - - if (vsi->tx_rings[0]) { - len = sizeof(struct i40e_tx_buffer); - for (i = 0; i < vsi->num_queue_pairs; i++) { - memcpy(p, vsi->tx_rings[i]->tx_bi, len); - p += len; - } - len = sizeof(struct i40e_rx_buffer); - for (i = 0; i < vsi->num_queue_pairs; i++) { - memcpy(p, vsi->rx_rings[i]->rx_bi, len); - p += len; - } - } - - /* macvlan filter list */ - len = sizeof(struct i40e_mac_filter); - list_for_each_entry(f, &vsi->mac_filter_list, list) { - memcpy(p, f, len); - p += len; - } - - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "VSI seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - mutex_unlock(&pf->switch_mutex); - } else if (seid >= I40E_BASE_VEB_SEID) { - struct i40e_veb *veb = NULL; - - mutex_lock(&pf->switch_mutex); - veb = i40e_dbg_find_veb(pf, seid); - if (!veb) { - mutex_unlock(&pf->switch_mutex); - goto write_exit; - } - - buflen = sizeof(struct i40e_veb); - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - seid_found = true; - memcpy(i40e_dbg_dump_buf, veb, buflen); - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "VEB seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - mutex_unlock(&pf->switch_mutex); - } - -write_exit: - if (!seid_found) - dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid); - - return count; -} - -static const struct file_operations i40e_dbg_dump_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = i40e_dbg_dump_read, - .write = i40e_dbg_dump_write, -}; - -/************************************************************** * command * The command entry in debugfs is for giving the driver commands * to be executed - these may be for changing the internal switch @@ -379,19 +135,27 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) return; } dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); - if (vsi->netdev) - dev_info(&pf->pdev->dev, - " netdev: name = %s\n", - vsi->netdev->name); + if (vsi->netdev) { + struct net_device *nd = vsi->netdev; + + dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", + nd->name, nd->state, nd->flags); + dev_info(&pf->pdev->dev, " features = 0x%08lx\n", + (unsigned long int)nd->features); + dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", + (unsigned long int)nd->hw_features); + dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", + (unsigned long int)nd->vlan_features); + } if (vsi->active_vlans) dev_info(&pf->pdev->dev, " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, - " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", - vsi->netdev_registered, - vsi->current_netdev_flags, vsi->state, vsi->flags); + " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", + vsi->state, vsi->flags, + vsi->netdev_registered, vsi->current_netdev_flags); if (vsi == pf->vsi[pf->lan_vsi]) - dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", pf->hw.mac.addr, pf->hw.mac.san_addr, pf->hw.mac.port_addr); @@ -511,7 +275,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->dtype); dev_info(&pf->pdev->dev, " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, rx_ring->hsplit, + i, ring_is_ps_enabled(rx_ring), rx_ring->next_to_use, rx_ring->next_to_clean, rx_ring->ring_active); @@ -526,6 +290,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->rx_stats.alloc_page_failed, rx_ring->rx_stats.alloc_buff_failed); dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n", + i, + rx_ring->rx_stats.realloc_count, + rx_ring->rx_stats.page_reuse_count); + dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i, dma = 0x%08lx\n", i, rx_ring->size, (unsigned long int)rx_ring->dma); @@ -533,6 +302,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " rx_rings[%i]: vsi = %p, q_vector = %p\n", i, rx_ring->vsi, rx_ring->q_vector); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_itr_setting = %d (%s)\n", + i, rx_ring->rx_itr_setting, + ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); @@ -557,8 +330,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " tx_rings[%i]: dtype = %d\n", i, tx_ring->dtype); dev_info(&pf->pdev->dev, - " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, tx_ring->hsplit, + " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->ring_active); @@ -583,14 +356,15 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " tx_rings[%i]: DCB tc = %d\n", i, tx_ring->dcb_tc); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_itr_setting = %d (%s)\n", + i, tx_ring->tx_itr_setting, + ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ? "dynamic" : "fixed"); } rcu_read_unlock(); dev_info(&pf->pdev->dev, - " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", - vsi->work_limit, vsi->rx_itr_setting, - ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed", - vsi->tx_itr_setting, - ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed"); + " work_limit = %d\n", + vsi->work_limit); dev_info(&pf->pdev->dev, " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); @@ -815,20 +589,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (!is_rx_ring) { txd = I40E_TX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); @@ -843,20 +617,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (!is_rx_ring) { txd = I40E_TX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); @@ -918,12 +692,6 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) { struct i40e_veb *veb; - if ((seid < I40E_BASE_VEB_SEID) || - (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) { - dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); - return; - } - veb = i40e_dbg_find_veb(pf, seid); if (!veb) { dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); @@ -2202,11 +1970,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf) if (!pfile) goto create_failed; - pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_dump_fops); - if (!pfile) - goto create_failed; - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_netdev_ops_fops); if (!pfile) @@ -2227,9 +1990,6 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf) { debugfs_remove_recursive(pf->i40e_dbg_pf); pf->i40e_dbg_pf = NULL; - - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buf = NULL; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 448ef4c17efb..99257fcd1ef4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -39,13 +39,11 @@ #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_20G_KR2_A 0x1588 #define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_KX_X722 0x37CE +#define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 29d5833e24a3..784b1659457a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -89,6 +89,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), + I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt), + I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), + I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, @@ -143,6 +146,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_oversize", stats.rx_oversize), I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("arq_overflows", arq_overflows), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), @@ -232,6 +236,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "flow-director-atr", "veb-stats", "packet-split", + "hw-atr-eviction", }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) @@ -340,7 +345,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (pf->hw.mac.type == I40E_MAC_X722) { + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { ecmd->supported |= SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) @@ -411,6 +416,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, if (pf->hw.mac.type == I40E_MAC_X722) { ecmd->supported |= SUPPORTED_100baseT_Full; ecmd->advertising |= ADVERTISED_100baseT_Full; + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_100baseT_Full; + } } } if (phy_types & I40E_CAP_PHY_TYPE_XAUI || @@ -996,16 +1005,19 @@ static int i40e_get_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { - struct i40e_nvm_access *cmd; - int errno; + struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; + int errno = 0; /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) - return -EINVAL; + errno = -EINVAL; + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + errno = -EBUSY; + else + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - cmd = (struct i40e_nvm_access *)eeprom; - ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) + if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1089,27 +1101,25 @@ static int i40e_set_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; - struct i40e_nvm_access *cmd; + struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; int ret_val = 0; - int errno; + int errno = 0; u32 magic; /* normal ethtool set_eeprom is not supported */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic == magic) - return -EOPNOTSUPP; - + errno = -EOPNOTSUPP; /* check for NVMUpdate access method */ - if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) - return -EINVAL; - - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) - return -EBUSY; + else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) + errno = -EINVAL; + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + errno = -EBUSY; + else + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - cmd = (struct i40e_nvm_access *)eeprom; - ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) + if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1816,28 +1826,52 @@ static int i40e_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct i40e_netdev_priv *np = netdev_priv(netdev); + i40e_status ret = 0; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int blink_freq = 2; + u16 temp_status; switch (state) { case ETHTOOL_ID_ACTIVE: - pf->led_status = i40e_led_get(hw); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) { + pf->led_status = i40e_led_get(hw); + } else { + i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL); + ret = i40e_led_get_phy(hw, &temp_status, + &pf->phy_led_val); + pf->led_status = temp_status; + } return blink_freq; case ETHTOOL_ID_ON: - i40e_led_set(hw, 0xF, false); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) + i40e_led_set(hw, 0xf, false); + else + ret = i40e_led_set_phy(hw, true, pf->led_status, 0); break; case ETHTOOL_ID_OFF: - i40e_led_set(hw, 0x0, false); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) + i40e_led_set(hw, 0x0, false); + else + ret = i40e_led_set_phy(hw, false, pf->led_status, 0); break; case ETHTOOL_ID_INACTIVE: - i40e_led_set(hw, pf->led_status, false); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) { + i40e_led_set(hw, false, pf->led_status); + } else { + ret = i40e_led_set_phy(hw, false, pf->led_status, + (pf->phy_led_val | + I40E_PHY_LED_MODE_ORIG)); + i40e_aq_set_phy_debug(hw, 0, NULL); + } break; default: break; } - - return 0; + if (ret) + return -ENOENT; + else + return 0; } /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt @@ -1845,8 +1879,9 @@ static int i40e_set_phys_id(struct net_device *netdev, * 125us (8000 interrupts per second) == ITR(62) */ -static int i40e_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int __i40e_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + int queue) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1854,14 +1889,24 @@ static int i40e_get_coalesce(struct net_device *netdev, ec->tx_max_coalesced_frames_irq = vsi->work_limit; ec->rx_max_coalesced_frames_irq = vsi->work_limit; - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) + /* rx and tx usecs has per queue value. If user doesn't specify the queue, + * return queue 0's value to represent. + */ + if (queue < 0) { + queue = 0; + } else if (queue >= vsi->num_queue_pairs) { + return -EINVAL; + } + + if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting)) ec->use_adaptive_rx_coalesce = 1; - if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting)) ec->use_adaptive_tx_coalesce = 1; - ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; - ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC; + /* we use the _usecs_high to store/set the interrupt rate limit * that the hardware supports, that almost but not quite * fits the original intent of the ethtool variable, @@ -1874,15 +1919,63 @@ static int i40e_get_coalesce(struct net_device *netdev, return 0; } -static int i40e_set_coalesce(struct net_device *netdev, +static int i40e_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { - struct i40e_netdev_priv *np = netdev_priv(netdev); + return __i40e_get_coalesce(netdev, ec, -1); +} + +static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __i40e_get_coalesce(netdev, ec, queue); +} + +static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, + struct ethtool_coalesce *ec, + int queue) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; struct i40e_q_vector *q_vector; + u16 vector, intrl; + + intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + + vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; + vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; + + if (ec->use_adaptive_rx_coalesce) + vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + + if (ec->use_adaptive_tx_coalesce) + vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC; + + q_vector = vsi->rx_rings[queue]->q_vector; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting); + vector = vsi->base_vector + q_vector->v_idx; + wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); + + q_vector = vsi->tx_rings[queue]->q_vector; + q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting); + vector = vsi->base_vector + q_vector->v_idx; + wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); + + wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); + i40e_flush(hw); +} + +static int __i40e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + int queue) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u16 vector; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) @@ -1899,57 +1992,53 @@ static int i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - vector = vsi->base_vector; - if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && - (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { - vsi->rx_itr_setting = ec->rx_coalesce_usecs; - } else if (ec->rx_coalesce_usecs == 0) { - vsi->rx_itr_setting = ec->rx_coalesce_usecs; + if (ec->rx_coalesce_usecs == 0) { if (ec->use_adaptive_rx_coalesce) netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else { - netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); - return -EINVAL; + } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || + (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; } vsi->int_rate_limit = ec->rx_coalesce_usecs_high; - if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && - (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { - vsi->tx_itr_setting = ec->tx_coalesce_usecs; - } else if (ec->tx_coalesce_usecs == 0) { - vsi->tx_itr_setting = ec->tx_coalesce_usecs; + if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); + } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || + (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) { + netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + /* rx and tx usecs has per queue value. If user doesn't specify the queue, + * apply to all queues. + */ + if (queue < 0) { + for (i = 0; i < vsi->num_queue_pairs; i++) + i40e_set_itr_per_queue(vsi, ec, i); + } else if (queue < vsi->num_queue_pairs) { + i40e_set_itr_per_queue(vsi, ec, queue); } else { - netif_info(pf, drv, netdev, - "Invalid value, tx-usecs range is 0-8160\n"); + netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", + vsi->num_queue_pairs - 1); return -EINVAL; } - if (ec->use_adaptive_rx_coalesce) - vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; - else - vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC; - - if (ec->use_adaptive_tx_coalesce) - vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; - else - vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; - - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + return 0; +} - q_vector = vsi->q_vectors[i]; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); - wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); - wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); - wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); - i40e_flush(hw); - } +static int i40e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __i40e_set_coalesce(netdev, ec, -1); +} - return 0; +static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __i40e_set_coalesce(netdev, ec, queue); } /** @@ -2147,8 +2236,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) { struct i40e_hw *hw = &pf->hw; - u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | - ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | + ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports @@ -2166,9 +2255,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: @@ -2178,9 +2270,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: @@ -2190,10 +2285,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; @@ -2204,10 +2302,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; @@ -2245,8 +2346,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return -EINVAL; } - wr32(hw, I40E_PFQF_HENA(0), (u32)hena); - wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); /* Save setting for future output/update */ @@ -2712,6 +2813,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_VEB_STATS : 0; ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? I40E_PRIV_FLAGS_PS : 0; + ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? + 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; return ret_flags; } @@ -2763,10 +2866,21 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; } - if (flags & I40E_PRIV_FLAGS_VEB_STATS) + if ((flags & I40E_PRIV_FLAGS_VEB_STATS) && + !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; - else + reset_required = true; + } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + reset_required = true; + } + + if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && + (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) + pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; + else + pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE; /* if needed, issue reset to cause things to take effect */ if (reset_required) @@ -2812,6 +2926,8 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_ts_info = i40e_get_ts_info, .get_priv_flags = i40e_get_priv_flags, .set_priv_flags = i40e_set_priv_flags, + .get_per_queue_coalesce = i40e_get_per_queue_coalesce, + .set_per_queue_coalesce = i40e_set_per_queue_coalesce, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 579a46ca82df..8ad162c16f61 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -295,11 +295,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf) } /* enable FCoE hash filter */ - val = rd32(hw, I40E_PFQF_HENA(1)); + val = i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)); val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32); val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32); val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; - wr32(hw, I40E_PFQF_HENA(1), val); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), val); /* enable flag */ pf->flags |= I40E_FLAG_FCOE_ENABLED; @@ -317,11 +317,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf) pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K; /* Setup max frame with FCoE_MTU plus L2 overheads */ - val = rd32(hw, I40E_GLFCOE_RCTL); + val = i40e_read_rx_ctl(hw, I40E_GLFCOE_RCTL); val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK; val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT); - wr32(hw, I40E_GLFCOE_RCTL, val); + i40e_write_rx_ctl(hw, I40E_GLFCOE_RCTL, val); dev_info(&pf->pdev->dev, "FCoE is supported.\n"); } @@ -1359,16 +1359,32 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; struct i40e_tx_buffer *first; u32 tx_flags = 0; + int fso, count; u8 hdr_len = 0; u8 sof = 0; u8 eof = 0; - int fso; if (i40e_fcoe_set_skb_header(skb)) goto out_drop; - if (!i40e_xmit_descriptor_count(skb, tx_ring)) + count = i40e_xmit_descriptor_count(skb); + if (i40e_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) + goto out_drop; + count = TXD_USE_COUNT(skb->len); + tx_ring->tx_stats.tx_linearize++; + } + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) @@ -1457,7 +1473,7 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, - .ndo_setup_tc = i40e_setup_tc, + .ndo_setup_tc = __i40e_setup_tc, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8f3b53e0dc46..67006431726a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -28,11 +28,6 @@ #include <linux/of_net.h> #include <linux/pci.h> -#ifdef CONFIG_SPARC -#include <asm/idprom.h> -#include <asm/prom.h> -#endif - /* Local includes */ #include "i40e.h" #include "i40e_diag.h" @@ -51,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 8 +#define DRV_VERSION_BUILD 25 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -90,6 +85,8 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, @@ -110,6 +107,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *i40e_wq; + /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -290,12 +289,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) * * If not already scheduled, this puts the task into the work queue **/ -static void i40e_service_event_schedule(struct i40e_pf *pf) +void i40e_service_event_schedule(struct i40e_pf *pf) { if (!test_bit(__I40E_DOWN, &pf->state) && !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) - schedule_work(&pf->service_task); + queue_work(i40e_wq, &pf->service_task); } /** @@ -769,7 +768,7 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) if (vsi->type != I40E_VSI_FCOE) return; - idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; + idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; fs = &vsi->fcoe_stats; ofs = &vsi->fcoe_stats_offsets; @@ -820,6 +819,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; + u64 tx_lost_interrupt; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; @@ -845,6 +845,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_lost_interrupt = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -863,6 +864,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -881,6 +883,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_lost_interrupt = tx_lost_interrupt; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1368,7 +1371,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, f->changed = true; INIT_LIST_HEAD(&f->list); - list_add(&f->list, &vsi->mac_filter_list); + list_add_tail(&f->list, &vsi->mac_filter_list); } /* increment counter and add a new flag if needed */ @@ -1538,7 +1541,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ether_addr_copy(netdev->dev_addr, addr->sa_data); - return i40e_sync_vsi_filters(vsi); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -1762,6 +1769,11 @@ bottom_of_search_loop: vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** @@ -1933,7 +1945,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kzalloc(del_list_size, GFP_KERNEL); + del_list = kzalloc(del_list_size, GFP_ATOMIC); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); @@ -2011,7 +2023,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) sizeof(struct i40e_aqc_add_macvlan_element_data), add_list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); - add_list = kzalloc(add_list_size, GFP_KERNEL); + add_list = kzalloc(add_list_size, GFP_ATOMIC); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); @@ -2110,7 +2122,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); - if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { + if ((vsi->type == I40E_VSI_MAIN) && + (pf->lan_veb != I40E_NO_VEB) && + !(pf->flags & I40E_FLAG_MFP_ENABLED)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic @@ -2160,6 +2174,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } } out: + /* if something went wrong then set the changed flag so we try again */ + if (retval) + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + clear_bit(__I40E_CONFIG_BUSY, &vsi->state); return retval; } @@ -2212,7 +2230,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); - + i40e_notify_client_of_l2_param_changes(vsi); return 0; } @@ -3106,11 +3124,11 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) struct i40e_q_vector *q_vector = vsi->q_vectors[i]; q_vector->itr_countdown = ITR_COUNTDOWN_START; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting); q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); @@ -3202,10 +3220,10 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) /* set the ITR configuration */ q_vector->itr_countdown = ITR_COUNTDOWN_START; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting); q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); @@ -3245,14 +3263,15 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) /** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure + * @clearpba: true when all pending interrupt events should be cleared **/ -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba) { struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, val); @@ -3260,22 +3279,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) } /** - * i40e_irq_dynamic_disable - Disable default interrupt generation settings - * @vsi: pointer to a vsi - * @vector: disable a particular Hw Interrupt vector - **/ -void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 val; - - val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - i40e_flush(hw); -} - -/** * i40e_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector @@ -3400,7 +3403,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, true); } i40e_flush(&pf->hw); @@ -3459,16 +3462,12 @@ static irqreturn_t i40e_intr(int irq, void *data) struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; - /* temporarily disable queue cause for NAPI processing */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); - - qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_RQCTL(0), qval); - - qval = rd32(hw, I40E_QINT_TQCTL(0)); - qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - + /* We do not have a way to disarm Queue causes while leaving + * interrupt enabled for all other causes, ideally + * interrupt should be disabled while we are in NAPI but + * this is not a performance path and napi_schedule() + * can deal with rescheduling. + */ if (!test_bit(__I40E_DOWN, &pf->state)) napi_schedule_irqoff(&q_vector->napi); } @@ -3476,6 +3475,7 @@ static irqreturn_t i40e_intr(int irq, void *data) if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { @@ -3546,7 +3546,7 @@ enable_intr: wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_service_event_schedule(pf); - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, false); } return ret; @@ -3750,7 +3750,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) #ifdef CONFIG_NET_POLL_CONTROLLER /** - * i40e_netpoll - A Polling 'interrupt'handler + * i40e_netpoll - A Polling 'interrupt' handler * @netdev: network interface device structure * * This is used by netconsole to send skbs without having to re-enable @@ -3929,6 +3929,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) else rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); + /* No waiting for the Tx queue to disable */ + if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) + continue; /* wait for the change to finish */ ret = i40e_pf_rxq_wait(pf, pf_q, enable); @@ -4166,6 +4169,9 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) free_irq(pf->msix_entries[0].vector, pf); } + i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, + I40E_IWARP_IRQ_PILE_ID); + i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) @@ -4209,12 +4215,17 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) **/ static void i40e_vsi_close(struct i40e_vsi *vsi) { + bool reset = false; + if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) i40e_down(vsi); i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + reset = true; + i40e_notify_client_of_netdev_close(vsi, reset); } /** @@ -4287,12 +4298,12 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) #ifdef CONFIG_I40E_DCB /** - * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled + * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled * @vsi: the VSI being configured * - * This function waits for the given VSI's Tx queues to be disabled. + * This function waits for the given VSI's queues to be disabled. **/ -static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) +static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret; @@ -4309,24 +4320,36 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) } } + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + /* Check and wait for the disable status of the queue */ + ret = i40e_pf_rxq_wait(pf, pf_q, false); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Rx ring %d disable timeout\n", + vsi->seid, pf_q); + return ret; + } + } + return 0; } /** - * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled + * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF * - * This function waits for the Tx queues to be in disabled state for all the + * This function waits for the queues to be in disabled state for all the * VSIs that are managed by this PF. **/ -static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) +static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { int v, ret = 0; for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { /* No need to wait for FCoE VSI queues */ if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { - ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); + ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) break; } @@ -4352,7 +4375,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) { struct i40e_ring *tx_ring = NULL; struct i40e_pf *pf; - u32 head, val, tx_pending; + u32 head, val, tx_pending_hw; int i; pf = vsi->back; @@ -4378,16 +4401,9 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); - /* Bail out if interrupts are disabled because napi_poll - * execution in-progress or will get scheduled soon. - * napi_poll cleans TX and RX queues and updates 'next_to_clean'. - */ - if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) - return; - head = i40e_get_head(tx_ring); - tx_pending = i40e_get_tx_pending(tx_ring); + tx_pending_hw = i40e_get_tx_pending(tx_ring, false); /* HW is done executing descriptors, updated HEAD write back, * but SW hasn't processed those descriptors. If interrupt is @@ -4395,12 +4411,12 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) * dev_watchdog detecting timeout on those netdev_queue, * hence proactively trigger SW interrupt. */ - if (tx_pending) { + if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { /* NAPI Poll didn't run and clear since it was set */ if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &tx_ring->q_vector->hung_detected)) { - netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", - vsi->seid, q_idx, tx_pending, + netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", + vsi->seid, q_idx, tx_pending_hw, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail)); @@ -4413,6 +4429,17 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) &tx_ring->q_vector->hung_detected); } } + + /* This is the case where we have interrupts missing, + * so the tx_pending in HW will most likely be 0, but we + * will have tx_pending in SW since the WB happened but the + * interrupt got lost. + */ + if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && + (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { + if (napi_reschedule(&tx_ring->q_vector->napi)) + tx_ring->tx_stats.tx_lost_interrupt++; + } } /** @@ -4831,6 +4858,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ctxt.info = vsi->info; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); + if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; + } + /* Update the VSI after updating the VSI queue-mapping information */ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { @@ -4974,6 +5007,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } + i40e_notify_client_of_l2_param_changes(pf->vsi[v]); } } @@ -5016,8 +5050,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) int err = 0; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) + if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) goto out; /* Get the initial DCB configuration */ @@ -5173,6 +5206,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi) } i40e_fdir_filter_restore(vsi); } + + /* On the next run of the service_task, notify any clients of the new + * opened netdev + */ + pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; i40e_service_event_schedule(pf); return 0; @@ -5249,11 +5287,7 @@ void i40e_down(struct i40e_vsi *vsi) * @netdev: net device to configure * @tc: number of traffic classes to enable **/ -#ifdef I40E_FCOE -int i40e_setup_tc(struct net_device *netdev, u8 tc) -#else static int i40e_setup_tc(struct net_device *netdev, u8 tc) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5306,6 +5340,19 @@ exit: return ret; } +#ifdef I40E_FCOE +int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +#else +static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +#endif +{ + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + return i40e_setup_tc(netdev, tc->tc); +} + /** * i40e_open - Called when a network interface is made active * @netdev: network interface device structure @@ -5348,9 +5395,12 @@ int i40e_open(struct net_device *netdev) vxlan_get_rx_port(netdev); #endif #ifdef CONFIG_I40E_GENEVE - geneve_get_rx_port(netdev); + if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) + geneve_get_rx_port(netdev); #endif + i40e_notify_client_of_netdev_open(vsi); + return 0; } @@ -5713,8 +5763,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, if (ret) goto exit; - /* Wait for the PF's Tx queues to be disabled */ - ret = i40e_pf_wait_txq_disabled(pf); + /* Wait for the PF's queues to be disabled */ + ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); @@ -6015,6 +6065,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: case I40E_VSI_CTRL: + case I40E_VSI_IWARP: case I40E_VSI_MIRROR: default: /* there is no notification for other VSIs */ @@ -6244,6 +6295,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; + pf->arq_overflows++; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) @@ -6319,7 +6371,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: case i40e_aqc_opc_oem_post_update: - i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + i40e_debug(&pf->hw, I40E_DEBUG_NVM, + "ARQ NVM operation 0x%04x completed\n", + opcode); break; default: dev_info(&pf->pdev->dev, @@ -6803,12 +6857,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) goto end_core_reset; - /* driver is only interested in link up/down and module qualification - * reports from firmware + /* The driver only wants link up/down and module qualification + * reports from firmware. Note the negative logic. */ ret = i40e_aq_set_phy_int_mask(&pf->hw, - I40E_AQ_EVENT_LINK_UPDOWN | - I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), @@ -6889,8 +6943,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) wr32(hw, I40E_REG_MSS, val); } - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) @@ -7079,12 +7132,13 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_info(&pf->pdev->dev, - "%s vxlan port %d, index %d failed, err %s aq_err %s\n", - port ? "add" : "delete", - ntohs(port), i, - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, + dev_dbg(&pf->pdev->dev, + "%s %s port %d, index %d failed, err %s aq_err %s\n", + pf->udp_ports[i].type ? "vxlan" : "geneve", + port ? "add" : "delete", + ntohs(port), i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->udp_ports[i].index = 0; } @@ -7111,11 +7165,13 @@ static void i40e_service_task(struct work_struct *work) } i40e_detect_recover_hung(pf); + i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); + i40e_client_subtask(pf); i40e_sync_filters_subtask(pf); i40e_sync_udp_filters_subtask(pf); i40e_clean_adminq_subtask(pf); @@ -7290,8 +7346,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) set_bit(__I40E_DOWN, &vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; - vsi->rx_itr_setting = pf->rx_itr_default; - vsi->tx_itr_setting = pf->tx_itr_default; vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; @@ -7458,8 +7512,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->dcb_tc = 0; if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; - if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) - tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; + tx_ring->tx_itr_setting = pf->tx_itr_default; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -7476,6 +7529,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) set_ring_16byte_desc_enabled(rx_ring); else clear_ring_16byte_desc_enabled(rx_ring); + rx_ring->rx_itr_setting = pf->rx_itr_default; vsi->rx_rings[i] = rx_ring; } @@ -7520,6 +7574,7 @@ static int i40e_init_msix(struct i40e_pf *pf) int vectors_left; int v_budget, i; int v_actual; + int iwarp_requested = 0; if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return -ENODEV; @@ -7533,6 +7588,7 @@ static int i40e_init_msix(struct i40e_pf *pf) * is governed by number of cpus in the system. * - assumes symmetric Tx/Rx pairing * - The number of VMDq pairs + * - The CPU count within the NUMA node if iWARP is enabled #ifdef I40E_FCOE * - The number of FCOE qps. #endif @@ -7579,6 +7635,16 @@ static int i40e_init_msix(struct i40e_pf *pf) } #endif + /* can we reserve enough for iWARP? */ + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + if (!vectors_left) + pf->num_iwarp_msix = 0; + else if (vectors_left < pf->num_iwarp_msix) + pf->num_iwarp_msix = 1; + v_budget += pf->num_iwarp_msix; + vectors_left -= pf->num_iwarp_msix; + } + /* any vectors left over go for VMDq support */ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; @@ -7613,6 +7679,8 @@ static int i40e_init_msix(struct i40e_pf *pf) * of these features based on the policy and at the end disable * the features that did not get any vectors. */ + iwarp_requested = pf->num_iwarp_msix; + pf->num_iwarp_msix = 0; #ifdef I40E_FCOE pf->num_fcoe_qps = 0; pf->num_fcoe_msix = 0; @@ -7651,17 +7719,33 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_lan_msix = 1; break; case 3: + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + pf->num_lan_msix = 1; + pf->num_iwarp_msix = 1; + } else { + pf->num_lan_msix = 2; + } #ifdef I40E_FCOE /* give one vector to FCoE */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { pf->num_lan_msix = 1; pf->num_fcoe_msix = 1; } -#else - pf->num_lan_msix = 2; #endif break; default: + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + pf->num_iwarp_msix = min_t(int, (vec / 3), + iwarp_requested); + pf->num_vmdq_vsis = min_t(int, (vec / 3), + I40E_DEFAULT_NUM_VMDQ_VSI); + } else { + pf->num_vmdq_vsis = min_t(int, (vec / 2), + I40E_DEFAULT_NUM_VMDQ_VSI); + } + pf->num_lan_msix = min_t(int, + (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), + pf->num_lan_msix); #ifdef I40E_FCOE /* give one vector to FCoE */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { @@ -7669,8 +7753,6 @@ static int i40e_init_msix(struct i40e_pf *pf) vec--; } #endif - /* give the rest to the PF */ - pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); break; } } @@ -7680,6 +7762,12 @@ static int i40e_init_msix(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; } + + if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && + (pf->num_iwarp_msix == 0)) { + dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); + pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + } #ifdef I40E_FCOE if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { @@ -7771,6 +7859,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) vectors = i40e_init_msix(pf); if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | + I40E_FLAG_IWARP_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | #endif @@ -7852,7 +7941,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) i40e_flush(hw); - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, true); return err; } @@ -7936,6 +8025,52 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) } /** + * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands + * @vsi: Pointer to vsi structure + * @seed: Buffter to store the hash keys + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries + * + * Return 0 on success, negative on failure + */ +static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int ret = 0; + + if (seed) { + ret = i40e_aq_get_rss_key(hw, vsi->id, + (struct i40e_aqc_get_set_rss_key_data *)seed); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot get RSS key, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } + } + + if (lut) { + bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; + + ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot get RSS lut, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } + } + + return ret; +} + +/** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure * @seed: RSS hash seed @@ -7956,7 +8091,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]); } if (lut) { @@ -7993,7 +8128,7 @@ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); + seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); } if (lut) { u32 *lut_dw = (u32 *)lut; @@ -8037,7 +8172,12 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) */ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { - return i40e_get_rss_reg(vsi, seed, lut, lut_size); + struct i40e_pf *pf = vsi->back; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_get_rss_aq(vsi, seed, lut, lut_size); + else + return i40e_get_rss_reg(vsi, seed, lut, lut_size); } /** @@ -8071,19 +8211,19 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ - hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | - ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | + ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= i40e_pf_get_default_rss_hena(pf); - wr32(hw, I40E_PFQF_HENA(0), (u32)hena); - wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); /* Determine the RSS table size based on the hardware capabilities */ - reg_val = rd32(hw, I40E_PFQF_CTL_0); + reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); - wr32(hw, I40E_PFQF_CTL_0, reg_val); + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); /* Determine the RSS size of the VSI */ if (!vsi->rss_size) @@ -8367,12 +8507,38 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4))) { + pf->flags |= I40E_FLAG_RESTART_AUTONEG; + /* No DCB support for FW < v4.33 */ + pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; + } + + /* Disable FW LLDP if FW < v4.3 */ + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || + (pf->hw.aq.fw_maj_ver < 4))) + pf->flags |= I40E_FLAG_STOP_FW_LLDP; + + /* Use the FW Set LLDP MIB API if FW > v4.40 */ + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || + (pf->hw.aq.fw_maj_ver >= 5))) + pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; + if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } + if (pf->hw.func_caps.iwarp) { + pf->flags |= I40E_FLAG_IWARP_ENABLED; + /* IWARP needs one extra vector for CQP just like MISC.*/ + pf->num_iwarp_msix = (int)num_online_cpus() + 1; + } + #ifdef I40E_FCOE i40e_init_pf_fcoe(pf); @@ -8393,8 +8559,19 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_100M_SGMII_CAPABLE | + I40E_FLAG_USE_SET_LLDP_MIB | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + } else if ((pf->hw.aq.api_maj_ver > 1) || + ((pf->hw.aq.api_maj_ver == 1) && + (pf->hw.aq.api_min_ver > 4))) { + /* Supported in FW API version higher than 1.4 */ + pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + } else { + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } + pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -8530,9 +8707,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev, u8 next_idx; u8 idx; - if (sa_family == AF_INET6) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ @@ -8572,9 +8746,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev, struct i40e_pf *pf = vsi->back; u8 idx; - if (sa_family == AF_INET6) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ @@ -8608,7 +8779,7 @@ static void i40e_add_geneve_port(struct net_device *netdev, u8 next_idx; u8 idx; - if (sa_family == AF_INET6) + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) return; idx = i40e_get_udp_port_idx(pf, port); @@ -8652,7 +8823,7 @@ static void i40e_del_geneve_port(struct net_device *netdev, struct i40e_pf *pf = vsi->back; u8 idx; - if (sa_family == AF_INET6) + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) return; idx = i40e_get_udp_port_idx(pf, port); @@ -8890,7 +9061,7 @@ static const struct net_device_ops i40e_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, #endif - .ndo_setup_tc = i40e_setup_tc, + .ndo_setup_tc = __i40e_setup_tc, #ifdef I40E_FCOE .ndo_fcoe_enable = i40e_fcoe_enable, .ndo_fcoe_disable = i40e_fcoe_disable, @@ -8942,11 +9113,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_RXCSUM | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_TSO; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + 0; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | @@ -8967,6 +9142,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) netdev->features |= NETIF_F_NTUPLE; + if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; @@ -9216,6 +9393,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } + if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + ctxt.info.queueing_opt_flags |= + I40E_AQ_VSI_QUE_OPT_TCP_ENA; + } + ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; if (pf->vf[vsi->vf_id].spoofchk) { @@ -9239,6 +9423,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) break; #endif /* I40E_FCOE */ + case I40E_VSI_IWARP: + /* send down message to iWARP */ + break; + default: return -ENODEV; } @@ -9471,10 +9659,15 @@ vector_setup_out: **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { - struct i40e_pf *pf = vsi->back; + struct i40e_pf *pf; u8 enabled_tc; int ret; + if (!vsi) + return NULL; + + pf = vsi->back; + i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_vsi_clear_rings(vsi); @@ -9975,13 +10168,13 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; bool is_default = veb->pf->cur_promisc; - bool is_cloud = false; + bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret; /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, - is_cloud, &veb->seid, NULL); + &veb->seid, enable_stats, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", @@ -10350,6 +10543,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | + I40E_FLAG_IWARP_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | #endif @@ -10367,6 +10561,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left -= pf->num_lan_qps; pf->flags &= ~(I40E_FLAG_RSS_ENABLED | + I40E_FLAG_IWARP_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | #endif @@ -10538,21 +10733,9 @@ static void i40e_print_features(struct i40e_pf *pf) **/ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) { - struct device_node *dp = pci_device_to_OF_node(pdev); - const unsigned char *addr; - u8 *mac_addr = pf->hw.mac.addr; - pf->flags &= ~I40E_FLAG_PF_MAC; - addr = of_get_mac_address(dp); - if (addr) { - ether_addr_copy(mac_addr, addr); + if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) pf->flags |= I40E_FLAG_PF_MAC; -#ifdef CONFIG_SPARC - } else { - ether_addr_copy(mac_addr, idprom->id_ethaddr); - pf->flags |= I40E_FLAG_PF_MAC; -#endif /* CONFIG_SPARC */ - } } /** @@ -10575,7 +10758,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 wol_nvm_bits; u16 link_status; int err; - u32 len; u32 val; u32 i; u8 set_fc_aq_fail; @@ -10758,8 +10940,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, NULL); } @@ -10834,8 +11015,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ - len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; - pf->vsi = kzalloc(len, GFP_KERNEL); + pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), + GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; @@ -10882,12 +11063,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - /* driver is only interested in link up/down and module qualification - * reports from firmware + /* The driver only wants link up/down and module qualification + * reports from firmware. Note the negative logic. */ err = i40e_aq_set_phy_int_mask(&pf->hw, - I40E_AQ_EVENT_LINK_UPDOWN | - I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), @@ -10904,8 +11085,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) @@ -10939,8 +11119,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, &pf->state)) { - u32 val; - /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; @@ -10959,7 +11137,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #endif /* CONFIG_PCI_IOV */ - pfs_found++; + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, + pf->num_iwarp_msix, + I40E_IWARP_IRQ_PILE_ID); + if (pf->iwarp_base_vector < 0) { + dev_info(&pdev->dev, + "failed to get tracking for %d vectors for IWARP err=%d\n", + pf->num_iwarp_msix, pf->iwarp_base_vector); + pf->flags &= ~I40E_FLAG_IWARP_ENABLED; + } + } i40e_dbg_pf_init(pf); @@ -10970,6 +11158,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); + /* add this PF to client device list and launch a client service task */ + err = i40e_lan_add_device(pf); + if (err) + dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", + err); + #ifdef I40E_FCOE /* create FCoE interface */ i40e_fcoe_vsi_setup(pf); @@ -11051,6 +11245,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); + if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || + (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) + pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY; + /* print a string summarizing features */ i40e_print_features(pf); @@ -11107,10 +11305,11 @@ static void i40e_remove(struct pci_dev *pdev) i40e_ptp_stop(pf); /* Disable RSS in hw */ - wr32(hw, I40E_PFQF_HENA(0), 0); - wr32(hw, I40E_PFQF_HENA(1), 0); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ + set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); @@ -11140,9 +11339,16 @@ static void i40e_remove(struct pci_dev *pdev) if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); + /* remove attached clients */ + ret_code = i40e_lan_del_device(pf); + if (ret_code) { + dev_warn(&pdev->dev, "Failed to delete client device: %d\n", + ret_code); + } + /* shutdown and destroy the HMC */ - if (pf->hw.hmc.hmc_obj) { - ret_code = i40e_shutdown_lan_hmc(&pf->hw); + if (hw->hmc.hmc_obj) { + ret_code = i40e_shutdown_lan_hmc(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the HMC resources: %d\n", @@ -11150,7 +11356,7 @@ static void i40e_remove(struct pci_dev *pdev) } /* shutdown the adminq */ - ret_code = i40e_shutdown_adminq(&pf->hw); + ret_code = i40e_shutdown_adminq(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the Admin Queue resources: %d\n", @@ -11178,7 +11384,7 @@ static void i40e_remove(struct pci_dev *pdev) kfree(pf->qp_pile); kfree(pf->vsi); - iounmap(pf->hw.hw_addr); + iounmap(hw->hw_addr); kfree(pf); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); @@ -11413,6 +11619,16 @@ static int __init i40e_init_module(void) i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + /* we will see if single thread per module is enough for now, + * it can't be any worse than using the system workqueue which + * was already single threaded + */ + i40e_wq = create_singlethread_workqueue(i40e_driver_name); + if (!i40e_wq) { + pr_err("%s: Failed to create workqueue\n", i40e_driver_name); + return -ENOMEM; + } + i40e_dbg_init(); return pci_register_driver(&i40e_driver); } @@ -11427,6 +11643,7 @@ module_init(i40e_init_module); static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); + destroy_workqueue(i40e_wq); i40e_dbg_exit(); } module_exit(i40e_exit_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 6100cdd9ad13..5730f8091e1b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -693,10 +693,11 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->aq.nvm_release_on_done); + hw->aq.nvm_release_on_done, + cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { *perrno = -EFAULT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index bb9d583e5416..d51eee5bf79a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -74,6 +74,12 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); +i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode); +i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val); +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); /* admin send queue commands */ @@ -127,6 +133,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); @@ -135,8 +144,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, bool enable_l2_filtering, - u16 *pveb_seid, + bool default_port, u16 *pveb_seid, + bool enable_stats, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, @@ -149,6 +158,15 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free); +i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free); + i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); @@ -324,4 +342,19 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); +i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); +i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); +i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 *value); +i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 value); +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index dc0402fe3370..86ca27f72f02 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -2045,6 +2045,14 @@ #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 #define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GL_PRS_FVBM_MAX_INDEX 3 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 +#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) #define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 #define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) @@ -2216,6 +2224,14 @@ #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) #define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_MSK_MAX_INDEX 63 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 @@ -5155,6 +5171,38 @@ #define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) +#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_FD_MSK_MAX_INDEX 1 +#define I40E_GLQF_FD_MSK_MASK_SHIFT 0 +#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) +#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) +#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_INSET_MAX_INDEX 1 +#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 +#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) +#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_MSK_MAX_INDEX 1 +#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 +#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) +#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) +#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_ORT_MAX_INDEX 63 +#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 +#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) +#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 +#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) +#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 +#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) +#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ +#define I40E_GLQF_PIT_MAX_INDEX 23 +#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) +#define I40E_GLQF_PIT_FSIZE_SHIFT 5 +#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) +#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 +#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ #define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 #define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 47bd8b3145a7..084d0ab316b7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -610,15 +610,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) /** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors + * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40e_get_tx_pending(struct i40e_ring *ring) +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - head = i40e_get_head(ring); + if (!in_sw) + head = i40e_get_head(ring); + else + head = ring->next_to_clean; tail = readl(ring->tail); if (head != tail) @@ -741,7 +745,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40e_get_tx_pending(tx_ring); + j = i40e_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && @@ -774,29 +778,48 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } /** - * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors + * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about - * @q_vector: the vector on which to force writeback + * @q_vector: the vector on which to enable writeback * **/ -void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; + u32 val; - if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { - u32 val; + if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + return; - if (q_vector->arm_wb_state) - return; + if (q_vector->arm_wb_state) + return; - val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK; + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), val); - q_vector->arm_wb_state = true; - } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + } else { + val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ + + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); + } + q_vector->arm_wb_state = true; +} + +/** + * i40e_force_wb - Issue SW Interrupt so HW does a wb + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | @@ -1041,7 +1064,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (rx_bi->page_dma) { dma_unmap_page(dev, rx_bi->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE, DMA_FROM_DEVICE); rx_bi->page_dma = 0; } @@ -1176,16 +1199,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + const int current_node = numa_node_id(); /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -1193,56 +1219,79 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) if (bi->skb) /* desc is in use */ goto no_buffers; + + /* If we've been moved to a different NUMA node, release the + * page so we can get a new one on the current node. + */ + if (bi->page && page_to_nid(bi->page) != current_node) { + dma_unmap_page(rx_ring->dev, + bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(bi->page); + bi->page = NULL; + bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else if (bi->page) { + rx_ring->rx_stats.page_reuse_count++; + } + if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { rx_ring->rx_stats.alloc_page_failed++; goto no_buffers; } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; bi->page_dma = dma_map_page(rx_ring->dev, bi->page, - bi->page_offset, - PAGE_SIZE / 2, + 0, + PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { + if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { rx_ring->rx_stats.alloc_page_failed++; + __free_page(bi->page); + bi->page = NULL; bi->page_dma = 0; + bi->page_offset = 0; goto no_buffers; } + bi->page_offset = 0; } - dma_sync_single_range_for_device(rx_ring->dev, - bi->dma, - 0, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1251,7 +1300,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -1259,8 +1308,10 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; goto no_buffers; @@ -1278,6 +1329,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) if (dma_mapping_error(rx_ring->dev, bi->dma)) { rx_ring->rx_stats.alloc_buff_failed++; bi->dma = 0; + dev_kfree_skb(bi->skb); + bi->skb = NULL; goto no_buffers; } } @@ -1289,9 +1342,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** @@ -1326,16 +1389,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4 = false, ipv6 = false; - bool ipv4_tunnel, ipv6_tunnel; - __wsum rx_udp_csum; - struct iphdr *iph; - __sum16 csum; - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; @@ -1351,12 +1405,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (!(decoded.known && decoded.outer_ip)) return; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) - ipv4 = true; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) - ipv6 = true; + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -1380,37 +1432,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check - * it in the driver, hardware does not do it for us. - * Since L3L4P bit was set we assume a valid IHL value (>=5) - * so the total length of IPv4 header is IHL*4 bytes - * The UDP_0 bit *may* bet set if the *inner* header is UDP + /* The hardware supported by this driver does not validate outer + * checksums for tunneled VXLAN or GENEVE frames. I don't agree + * with it but the specification states that you "MAY validate", it + * doesn't make it a hard requirement so if we have validated the + * inner checksum report CHECKSUM_UNNECESSARY. */ - if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && - (ipv4_tunnel)) { - skb->transport_header = skb->mac_header + - sizeof(struct ethhdr) + - (ip_hdr(skb)->ihl * 4); - - /* Add 4 bytes for VLAN tagged packets */ - skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - ? VLAN_HLEN : 0; - - if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && - (udp_hdr(skb)->check != 0)) { - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic( - iph->saddr, iph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); - - if (udp_hdr(skb)->check != csum) - goto checksum_fail; - - } /* else its GRE and so no outer UDP header */ - } + + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; @@ -1475,18 +1507,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + bool failure = false; u8 rx_ptype; u64 qword; + u32 copysize; if (budget <= 0) return 0; @@ -1497,7 +1530,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + failure = failure || + i40e_alloc_rx_buffers_ps(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1515,6 +1550,12 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) * DD bit is set. */ dma_rmb(); + /* sync header buffer for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc); I40E_RX_INCREMENT(rx_ring, i); @@ -1523,10 +1564,13 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; + failure = true; break; } @@ -1534,8 +1578,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_record_rx_queue(skb, rx_ring->queue_index); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); } @@ -1553,9 +1597,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - prefetch(rx_bi->page); + /* sync half-page for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->page_dma, + rx_bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + prefetch(page_address(rx_bi->page) + rx_bi->page_offset); rx_bi->skb = NULL; cleaned_count++; + copysize = 0; if (rx_hbo || rx_sph) { int len; @@ -1566,38 +1617,50 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); } else if (skb->len == 0) { int len; + unsigned char *va = page_address(rx_bi->page) + + rx_bi->page_offset; - len = (rx_packet_len > skb_headlen(skb) ? - skb_headlen(skb) : rx_packet_len); - memcpy(__skb_put(skb, len), - rx_bi->page + rx_bi->page_offset, - len); - rx_bi->page_offset += len; + len = min(rx_packet_len, rx_ring->rx_hdr_len); + memcpy(__skb_put(skb, len), va, len); + copysize = len; rx_packet_len -= len; } - /* Get the rest of the data if this was a header split */ if (rx_packet_len) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset, - rx_packet_len); - - skb->len += rx_packet_len; - skb->data_len += rx_packet_len; - skb->truesize += rx_packet_len; - - if ((page_count(rx_bi->page) == 1) && - (page_to_nid(rx_bi->page) == current_node)) - get_page(rx_bi->page); - else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset + copysize, + rx_packet_len, I40E_RXBUFFER_2048); + + /* If the page count is more than 2, then both halves + * of the page are used and we need to free it. Do it + * here instead of in the alloc code. Otherwise one + * of the half-pages might be released between now and + * then, and we wouldn't know which one to use. + * Don't call get_page and free_page since those are + * both expensive atomic operations that just change + * the refcount in opposite directions. Just give the + * page to the stack; he can have our refcount. + */ + if (page_count(rx_bi->page) > 2) { + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); rx_bi->page = NULL; + rx_bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else { + get_page(rx_bi->page); + /* switch to the other half-page here; the + * allocation code programs the right addr + * into HW. If we haven't used this half-page, + * the address won't be changed, and HW can + * just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; + } - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; } I40E_RX_INCREMENT(rx_ring, i); @@ -1656,7 +1719,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } /** @@ -1674,6 +1737,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; u16 rx_packet_len; + bool failure = false; u8 rx_ptype; u64 qword; u16 i; @@ -1684,7 +1748,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + failure = failure || + i40e_alloc_rx_buffers_1buf(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1783,7 +1849,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -1791,7 +1857,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); @@ -1814,6 +1882,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, bool rx = false, tx = false; u32 rxval, txval; int vector; + int idx = q_vector->v_idx; vector = (q_vector->v_idx + vsi->base_vector); @@ -1823,17 +1892,17 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); if (q_vector->itr_countdown > 0 || - (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && - !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) { goto enable_int; } - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) { rx = i40e_set_new_dynamic_itr(&q_vector->rx); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } - if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) { tx = i40e_set_new_dynamic_itr(&q_vector->tx); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); } @@ -1906,7 +1975,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + clean_complete = clean_complete && + i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1930,7 +2000,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ - clean_complete &= (budget_per_ring != cleaned); + clean_complete = clean_complete && (budget_per_ring > cleaned); } /* If work not completed, return budget and polling will return */ @@ -1938,7 +2008,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_force_wb(vsi, q_vector); + i40e_enable_wb_on_itr(vsi, q_vector); } return budget; } @@ -1951,20 +2021,7 @@ tx_only: if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { i40e_update_enable_itr(vsi, q_vector); } else { /* Legacy mode */ - struct i40e_hw *hw = &vsi->back->hw; - /* We re-enable the queue 0 cause, but - * don't worry about dynamic_enable - * because we left it on for the other - * possible interrupts during napi - */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) | - I40E_QINT_RQCTL_CAUSE_ENA_MASK; - - wr32(hw, I40E_QINT_RQCTL(0), qval); - qval = rd32(hw, I40E_QINT_TQCTL(0)) | - I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - i40e_irq_dynamic_enable_icr0(vsi->back); + i40e_irq_dynamic_enable_icr0(vsi->back, false); } return 0; } @@ -1974,10 +2031,9 @@ tx_only: * @tx_ring: ring to add programming descriptor to * @skb: send buffer * @tx_flags: send tx flags - * @protocol: wire protocol **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol) + u32 tx_flags) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; @@ -1989,6 +2045,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, struct tcphdr *th; unsigned int hlen; u32 flex_ptype, dtype_cmd; + int l4_proto; u16 i; /* make sure ATR is enabled */ @@ -2002,36 +2059,28 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; + /* Currently only IPv4/IPv6 with TCP is supported */ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; - if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) { - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + /* snag network header to get L4 type and address */ + hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? + skb_inner_network_header(skb) : skb_network_header(skb); - /* Currently only IPv4/IPv6 with TCP is supported - * access ihl as u8 to avoid unaligned access on ia64 - */ - if (tx_flags & I40E_TX_FLAGS_IPV4) - hlen = (hdr.network[0] & 0x0F) << 2; - else if (protocol == htons(ETH_P_IPV6)) - hlen = sizeof(struct ipv6hdr); - else - return; + /* Note: tx_flags gets modified to reflect inner protocols in + * tx_enable_csum function if encap is enabled. + */ + if (tx_flags & I40E_TX_FLAGS_IPV4) { + /* access ihl as u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + l4_proto = hdr.ipv4->protocol; } else { - hdr.network = skb_inner_network_header(skb); - hlen = skb_inner_network_header_len(skb); + hlen = hdr.network - skb->data; + l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); + hlen -= hdr.network - skb->data; } - /* Currently only IPv4/IPv6 with TCP is supported - * Note: tx_flags gets modified to reflect inner protocols in - * tx_enable_csum function if encap is enabled. - */ - if ((tx_flags & I40E_TX_FLAGS_IPV4) && - (hdr.ipv4->protocol != IPPROTO_TCP)) - return; - else if ((tx_flags & I40E_TX_FLAGS_IPV6) && - (hdr.ipv6->nexthdr != IPPROTO_TCP)) + if (l4_proto != IPPROTO_TCP) return; th = (struct tcphdr *)(hdr.network + hlen); @@ -2039,7 +2088,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && + (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2067,7 +2117,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & I40E_TXD_FLTR_QW0_QINDEX_MASK; - flex_ptype |= (protocol == htons(ETH_P_IP)) ? + flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << @@ -2101,7 +2151,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && + (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); @@ -2206,13 +2257,23 @@ out: static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { - u32 cd_cmd, cd_tso_len, cd_mss; - struct ipv6hdr *ipv6h; - struct tcphdr *tcph; - struct iphdr *iph; - u32 l4len; + u64 cd_cmd, cd_tso_len, cd_mss; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -2220,35 +2281,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); - - if (iph->version == 4) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - } else if (ipv6h->version == 6) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - ipv6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, - 0, IPPROTO_TCP, 0); + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; } - l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); - *hdr_len = (skb->encapsulation - ? (skb_inner_transport_header(skb) - skb->data) - : skb_transport_offset(skb)) + l4len; + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + /* determine offset of outer transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from outer checksum */ + paylen = (__force u16)l4.udp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.udp->check = ~csum_fold((__force __wsum)paylen); + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = (__force u16)l4.tcp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.tcp->check = ~csum_fold((__force __wsum)paylen); + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = skb_shinfo(skb)->gso_size; - *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - ((u64)cd_tso_len << - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); return 1; } @@ -2303,129 +2389,154 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, - u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, - u32 *cd_tunneling) +static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) { - struct ipv6hdr *this_ipv6_hdr; - unsigned int this_tcp_hdrlen; - struct iphdr *this_ip_hdr; - u32 network_hdr_len; - u8 l4_hdr = 0; - struct udphdr *oudph = NULL; - struct iphdr *oiph = NULL; - u32 l4_tunnel = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + u32 offset, cmd = 0, tunnel = 0; + __be16 frag_off; + u8 l4_proto = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { - switch (ip_hdr(skb)->protocol) { + /* define outer network header type */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_CTX_EXT_IP_IPV4 : + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* define outer transport */ + switch (l4_proto) { case IPPROTO_UDP: - oudph = udp_hdr(skb); - oiph = ip_hdr(skb); - l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + tunnel |= I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: - l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; + tunnel |= I40E_TXD_CTX_GRE_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; default: - return; - } - network_hdr_len = skb_inner_network_header_len(skb); - this_ip_hdr = inner_ip_hdr(skb); - this_ipv6_hdr = inner_ipv6_hdr(skb); - this_tcp_hdrlen = inner_tcp_hdrlen(skb); - - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; if (*tx_flags & I40E_TX_FLAGS_TSO) - ip_hdr(skb)->check = 0; + return -1; + + skb_checksum_help(skb); + return 0; } - /* Now set the ctx descriptor fields */ - *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - l4_tunnel | - ((skb_inner_network_offset(skb) - - skb_transport_offset(skb)) >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (this_ip_hdr->version == 6) { - *tx_flags &= ~I40E_TX_FLAGS_IPV4; + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + /* indicate if we need to offload outer UDP header */ + if ((*tx_flags & I40E_TX_FLAGS_TSO) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + + /* record tunnel offload values */ + *cd_tunneling |= tunnel; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + if (ip.v6->version == 6) *tx_flags |= I40E_TX_FLAGS_IPV6; - } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && - (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && - (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { - oudph->check = ~csum_tcpudp_magic(oiph->saddr, - oiph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, 0); - *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - } - } else { - network_hdr_len = skb_network_header_len(skb); - this_ip_hdr = ip_hdr(skb); - this_ipv6_hdr = ipv6_hdr(skb); - this_tcp_hdrlen = tcp_hdrlen(skb); } /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - l4_hdr = this_ip_hdr->protocol; + l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - this_ip_hdr->check = 0; - } else { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; - } - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : + I40E_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_hdr = this_ipv6_hdr->nexthdr; - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } - /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ - *td_offset |= (skb_network_offset(skb) >> 1) << - I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* compute inner L3 header size */ + offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ - switch (l4_hdr) { + switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (this_tcp_hdrlen >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - break; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; } + + *td_cmd |= cmd; + *td_offset |= offset; + + return 1; } /** @@ -2466,7 +2577,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, * * Returns -EBUSY if a stop is needed, else 0 **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ @@ -2483,77 +2594,71 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -#ifdef I40E_FCOE -inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#else -static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#endif -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - -/** - * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * __i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer - * @tx_flags: collected send information * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) +bool __i40e_chk_linearize(struct sk_buff *skb) { - struct skb_frag_struct *frag; - bool linearize = false; - unsigned int size = 0; - u16 num_frags; - u16 gso_segs; + const struct skb_frag_struct *frag, *stale; + int gso_size, nr_frags, sum; - num_frags = skb_shinfo(skb)->nr_frags; - gso_segs = skb_shinfo(skb)->gso_segs; + /* check to see if TSO is enabled, if so we may get a repreive */ + gso_size = skb_shinfo(skb)->gso_size; + if (unlikely(!gso_size)) + return true; - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 0; + /* no need to check if number of frags is less than 8 */ + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < I40E_MAX_BUFFER_TXD) + return false; - if (num_frags < (I40E_MAX_BUFFER_TXD)) - goto linearize_chk_done; - /* try the simple math, if we have too many frags per segment */ - if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > - I40E_MAX_BUFFER_TXD) { - linearize = true; - goto linearize_chk_done; - } - frag = &skb_shinfo(skb)->frags[0]; - /* we might still have more fragments per segment */ - do { - size += skb_frag_size(frag); - frag++; j++; - if ((size >= skb_shinfo(skb)->gso_size) && - (j < I40E_MAX_BUFFER_TXD)) { - size = (size % skb_shinfo(skb)->gso_size); - j = (size) ? 1 : 0; - } - if (j == I40E_MAX_BUFFER_TXD) { - linearize = true; - break; - } - num_frags--; - } while (num_frags); - } else { - if (num_frags >= I40E_MAX_BUFFER_TXD) - linearize = true; + /* We need to walk through the list and validate that each group + * of 6 fragments totals at least gso_size. However we don't need + * to perform such validation on the first or last 6 since the first + * 6 cannot inherit any data from a descriptor before them, and the + * last 6 cannot inherit any data from a descriptor after them. + */ + nr_frags -= I40E_MAX_BUFFER_TXD - 1; + frag = &skb_shinfo(skb)->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We + * use this as the worst case scenerio in which the frag ahead + * of us only provides one byte which is why we are limited to 6 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - gso_size; + + /* Add size of frags 1 through 5 to create our initial sum */ + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + stale = &skb_shinfo(skb)->frags[0]; + for (;;) { + sum += skb_frag_size(++frag); + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + /* use pre-decrement to avoid processing last fragment */ + if (!--nr_frags) + break; + + sum -= skb_frag_size(++stale); } -linearize_chk_done: - return linearize; + return false; } /** @@ -2760,43 +2865,6 @@ dma_error: } /** - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed - * @skb: send buffer - * @tx_ring: ring to send buffer on - * - * Returns number of data descriptors needed for this skb. Returns 0 to indicate - * there is not enough descriptors available in this ring since we need at least - * one descriptor. - **/ -#ifdef I40E_FCOE -inline int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#else -static inline int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#endif -{ - unsigned int f; - int count = 0; - - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 4 desc gap to avoid the cache line where head is, - * + 1 desc for context descriptor, - * otherwise try next time - */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - - count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { - tx_ring->tx_stats.tx_busy++; - return 0; - } - return count; -} - -/** * i40e_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer * @tx_ring: ring to send buffer on @@ -2814,14 +2882,30 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; + int tso, count; int tsyn; - int tso; /* prefetch the data, we'll need it later */ prefetch(skb->data); - if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + count = i40e_xmit_descriptor_count(skb); + if (i40e_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) + goto out_drop; + count = TXD_USE_COUNT(skb->len); + tx_ring->tx_stats.tx_linearize++; + } + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) @@ -2846,29 +2930,22 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; + /* Always offload the checksum, since it's in the data descriptor */ + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; + tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - if (i40e_chk_linearize(skb, tx_flags)) { - if (skb_linearize(skb)) - goto out_drop; - tx_ring->tx_stats.tx_linearize++; - } skb_tx_timestamp(skb); /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; - /* Always offload the checksum, since it's in the data descriptor */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - tx_flags |= I40E_TX_FLAGS_CSUM; - - i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - } - i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); @@ -2876,7 +2953,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, * * NOTE: this must always be directly before the data descriptor. */ - i40e_atr(tx_ring, skb, tx_flags, protocol); + i40e_atr(tx_ring, skb, tx_flags); i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3f081e25e097..cdd5dc00aec5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -153,7 +153,6 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM BIT(0) #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) @@ -203,12 +202,15 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; }; struct i40e_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; + u64 page_reuse_count; + u64 realloc_count; }; enum i40e_ring_state_t { @@ -246,6 +248,14 @@ struct i40e_ring { u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; + /* high bit set means dynamic, use accessor routines to read/write. + * hardware only supports 2us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 rx_itr_setting; + u16 tx_itr_setting; + u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ u16 rx_hdr_len; @@ -254,7 +264,6 @@ struct i40e_ring { #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 - u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 @@ -275,7 +284,6 @@ struct i40e_ring { u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) #define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) /* stats structs */ @@ -316,8 +324,8 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); void i40e_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); @@ -331,13 +339,13 @@ int i40e_napi_poll(struct napi_struct *napi, int budget); void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset); -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); -int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring); int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags); #endif void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40e_get_tx_pending(struct i40e_ring *ring); +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); +int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); +bool __i40e_chk_linearize(struct sk_buff *skb); /** * i40e_get_head - Retrieve head from head writeback @@ -352,4 +360,63 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } + +/** + * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + int count = 0, size = skb_headlen(skb); + + for (;;) { + count += TXD_USE_COUNT(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +/** + * i40e_maybe_stop_tx - 1st level check for Tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40e_maybe_stop_tx(tx_ring, size); +} + +/** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @count: number of buffers used + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) +{ + /* we can only support up to 8 data buffers for a single send */ + if (likely(count <= I40E_MAX_BUFFER_TXD)) + return false; + + return __i40e_chk_linearize(skb); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index dd2da356d9a1..3335f9d13374 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -78,7 +78,7 @@ enum i40e_debug_mask { I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, I40E_DEBUG_FD = 0x00001000, - + I40E_DEBUG_IWARP = 0x00F00000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, @@ -90,6 +90,22 @@ enum i40e_debug_mask { I40E_DEBUG_ALL = 0xFFFFFFFF }; +#define I40E_MDIO_STCODE 0 +#define I40E_MDIO_OPCODE_ADDRESS 0 +#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) + +#define I40E_PHY_COM_REG_PAGE 0x1E +#define I40E_PHY_LED_LINK_MODE_MASK 0xF0 +#define I40E_PHY_LED_MANUAL_ON 0x100 +#define I40E_PHY_LED_PROV_REG_1 0xC430 +#define I40E_PHY_LED_MODE_MASK 0xFFFF +#define I40E_PHY_LED_MODE_ORIG 0x80000000 + /* These are structs for managing the hardware information and the operations. * The structures of function pointers are filled out at init time when we * know for sure exactly which hardware we're working with. This gives us the @@ -144,6 +160,7 @@ enum i40e_vsi_type { I40E_VSI_MIRROR = 5, I40E_VSI_SRIOV = 6, I40E_VSI_FDIR = 7, + I40E_VSI_IWARP = 8, I40E_VSI_TYPE_UNKNOWN }; @@ -1098,6 +1115,10 @@ enum i40e_filter_program_desc_pcmd { I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) + #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 3226946bf3d4..ab866cf3dc18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -81,6 +81,9 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_IWARP = 20, + I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, + I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, }; /* Virtual channel message descriptor. This overlays the admin queue @@ -348,6 +351,37 @@ struct i40e_virtchnl_pf_event { int severity; }; +/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. +*/ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct i40e_virtchnl_iwarp_qv_info qv_info[1]; +}; + /* VF reset states - these are written into the RSTAT register: * I40E_VFGEN_RSTAT1 on the PF * I40E_VFGEN_RSTAT on the VF diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 63e62f9aec6e..816c6bbf7093 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -352,6 +352,136 @@ irq_list_done: } /** + * i40e_release_iwarp_qvlist + * @vf: pointer to the VF. + * + **/ +static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; + u32 msix_vf; + u32 i; + + if (!vf->qvlist_info) + return; + + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + for (i = 0; i < qvlist_info->num_vectors; i++) { + struct i40e_virtchnl_iwarp_qv_info *qv_info; + u32 next_q_index, next_q_type; + struct i40e_hw *hw = &pf->hw; + u32 v_idx, reg_idx, reg; + + qv_info = &qvlist_info->qv_info[i]; + if (!qv_info) + continue; + v_idx = qv_info->v_idx; + if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { + /* Figure out the queue after CEQ and make that the + * first queue. + */ + reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; + reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); + next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) + >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; + next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) + >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; + + reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); + reg = (next_q_index & + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | + (next_q_type << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); + + wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); + } + } + kfree(vf->qvlist_info); + vf->qvlist_info = NULL; +} + +/** + * i40e_config_iwarp_qvlist + * @vf: pointer to the VF info + * @qvlist_info: queue and vector list + * + * Return 0 on success or < 0 on error + **/ +static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, + struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_virtchnl_iwarp_qv_info *qv_info; + u32 v_idx, i, reg_idx, reg; + u32 next_q_idx, next_q_type; + u32 msix_vf, size; + + size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + + (sizeof(struct i40e_virtchnl_iwarp_qv_info) * + (qvlist_info->num_vectors - 1)); + vf->qvlist_info = kzalloc(size, GFP_KERNEL); + vf->qvlist_info->num_vectors = qvlist_info->num_vectors; + + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + for (i = 0; i < qvlist_info->num_vectors; i++) { + qv_info = &qvlist_info->qv_info[i]; + if (!qv_info) + continue; + v_idx = qv_info->v_idx; + + /* Validate vector id belongs to this vf */ + if (!i40e_vc_isvalid_vector_id(vf, v_idx)) + goto err; + + vf->qvlist_info->qv_info[i] = *qv_info; + + reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); + /* We might be sharing the interrupt, so get the first queue + * index and type, push it down the list by adding the new + * queue on top. Also link it with the new queue in CEQCTL. + */ + reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); + next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); + next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); + + if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { + reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; + reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | + (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | + (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | + (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | + (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); + wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); + + reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); + reg = (qv_info->ceq_idx & + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | + (I40E_QUEUE_TYPE_PE_CEQ << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); + wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); + } + + if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { + reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | + (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | + (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); + + wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); + } + } + + return 0; +err: + kfree(vf->qvlist_info); + vf->qvlist_info = NULL; + return -EINVAL; +} + +/** * i40e_config_vsi_tx_queue * @vf: pointer to the VF info * @vsi_id: id of VSI as provided by the FW @@ -461,7 +591,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; /* set splitalways mode 10b */ - rx_ctx.dtype = 0x2; + rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; } /* databuffer length validation */ @@ -602,8 +732,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) * that VF queues be mapped using this method, even when they are * contiguous in real life */ - wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), - I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); + i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), + I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* enable VF vplan_qtable mappings */ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; @@ -630,7 +760,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) (j * 2) + 1); reg |= qid << 16; } - wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); + i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), + reg); } i40e_flush(hw); @@ -849,9 +980,11 @@ complete_reset: /* reallocate VF resources to reset the VSI state */ i40e_free_vf_res(vf); if (!i40e_alloc_vf_res(vf)) { + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + i40e_notify_client_of_vf_reset(pf, abs_vf_id); } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); @@ -876,11 +1009,7 @@ void i40e_free_vfs(struct i40e_pf *pf) while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) usleep_range(1000, 2000); - for (i = 0; i < pf->num_alloc_vfs; i++) - if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) - i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], - false); - + i40e_notify_client_of_vf_enable(pf, 0); for (i = 0; i < pf->num_alloc_vfs; i++) if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], @@ -952,6 +1081,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) goto err_iov; } } + i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); /* allocate memory */ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); if (!vfs) { @@ -980,7 +1110,7 @@ err_alloc: i40e_free_vfs(pf); err_iov: /* Re-enable interrupt 0. */ - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, false); return ret; } @@ -1205,6 +1335,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + + if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) && + (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; + set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); + } + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) vfres->vf_offload_flags |= @@ -1213,9 +1350,21 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; } + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + } + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + } + vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; @@ -1814,6 +1963,72 @@ error_param: } /** + * i40e_vc_iwarp_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF for the iwarp msgs + **/ +static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_pf *pf = vf->pf; + int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, + msg, msglen); + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP, + aq_ret); +} + +/** + * i40e_vc_iwarp_qvmap_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * @config: config qvmap or release it + * + * called from the VF for the iwarp msgs + **/ +static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, + bool config) +{ + struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = + (struct i40e_virtchnl_iwarp_qvlist_info *)msg; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (config) { + if (i40e_config_iwarp_qvlist(vf, qvlist_info)) + aq_ret = I40E_ERR_PARAM; + } else { + i40e_release_iwarp_qvlist(vf); + } + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, + config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP : + I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, + aq_ret); +} + +/** * i40e_vc_validate_vf_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1908,6 +2123,32 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, case I40E_VIRTCHNL_OP_GET_STATS: valid_len = sizeof(struct i40e_virtchnl_queue_select); break; + case I40E_VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + valid_len = 0; + break; + case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct i40e_virtchnl_iwarp_qvlist_info *qv = + (struct i40e_virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct i40e_virtchnl_iwarp_qv_info)); + } + break; /* These are always errors coming from the VF. */ case I40E_VIRTCHNL_OP_EVENT: case I40E_VIRTCHNL_OP_UNKNOWN: @@ -1997,6 +2238,15 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, case I40E_VIRTCHNL_OP_GET_STATS: ret = i40e_vc_get_stats_msg(vf, msg, msglen); break; + case I40E_VIRTCHNL_OP_IWARP: + ret = i40e_vc_iwarp_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); + break; + case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); + break; case I40E_VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", @@ -2025,7 +2275,11 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; - /* re-enable vflr interrupt cause */ + /* Re-enable the VFLR interrupt cause here, before looking for which + * VF got reset. Otherwise, if another VF gets a reset while the + * first one is being processed, that interrupt will be lost, and + * that VF will be stuck in reset forever. + */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); @@ -2186,6 +2440,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, * and then reloading the VF driver. */ i40e_vc_disable_vf(pf, vf); + /* During reset the VF got a new VSI, so refresh the pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; } /* Check for condition where there was already a port VLAN ID @@ -2294,6 +2550,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, case I40E_LINK_SPEED_40GB: speed = 40000; break; + case I40E_LINK_SPEED_20GB: + speed = 20000; + break; case I40E_LINK_SPEED_10GB: speed = 10000; break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index da44995def42..e7b2fba0309e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -58,6 +58,7 @@ enum i40e_queue_ctrl { enum i40e_vf_states { I40E_VF_STAT_INIT = 0, I40E_VF_STAT_ACTIVE, + I40E_VF_STAT_IWARPENA, I40E_VF_STAT_FCOEENA, I40E_VF_STAT_DISABLED, }; @@ -66,6 +67,7 @@ enum i40e_vf_states { enum i40e_vf_capabilities { I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0, I40E_VIRTCHNL_VF_CAP_L2, + I40E_VIRTCHNL_VF_CAP_IWARP, }; /* VF information structure */ @@ -91,8 +93,8 @@ struct i40e_vf { * When assigned, these will be non-zero, because VSI 0 is always * the main LAN VSI for the PF. */ - u8 lan_vsi_idx; /* index into PF struct */ - u8 lan_vsi_id; /* ID as used by firmware */ + u16 lan_vsi_idx; /* index into PF struct */ + u16 lan_vsi_id; /* ID as used by firmware */ u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ @@ -106,6 +108,8 @@ struct i40e_vf { bool link_forced; bool link_up; /* only valid if VF link is forced */ bool spoofchk; + /* RDMA Client */ + struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info; }; void i40e_free_vfs(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 3f65e39b3fe4..44f7ed7583dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -887,6 +887,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, u16 flags; u16 ntu; + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index f5b2b369dc7c..aad8d6277110 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0004 +#define I40E_FW_API_VERSION_MINOR 0x0005 struct i40e_aq_desc { __le16 flags; @@ -145,6 +145,9 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -220,6 +223,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -228,6 +232,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -399,6 +404,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 @@ -419,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -677,6 +684,31 @@ struct i40e_aqc_switch_resource_alloc_element_resp { I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + +/* Read Receive control registers (direct 0x0206) + * Write Receive control registers (direct 0x0207) + * used for accessing Rx control registers that can be + * slow and need special handling when under high Rx load + */ +struct i40e_aqc_rx_ctl_reg_read_write { + __le32 reserved1; + __le32 address; + __le32 reserved2; + __le32 value; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) @@ -903,7 +935,8 @@ struct i40e_aqc_add_veb { I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; @@ -970,6 +1003,7 @@ struct i40e_aqc_add_macvlan_element_data { #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ @@ -1066,6 +1100,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; @@ -1254,10 +1289,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 + +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; @@ -1752,7 +1793,12 @@ struct i40e_aqc_get_link_status { u8 config; #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; + u8 external_power_ability; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); @@ -1820,6 +1866,18 @@ enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) @@ -1909,6 +1967,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer { I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2083,6 +2157,7 @@ struct i40e_aqc_add_udp_tunnel { #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 938783e0baac..771ac6ad8cda 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -904,6 +904,131 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { }; /** + * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: ptr to register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to read the Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val) + return I40E_ERR_PARAM; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_rx_ctl_reg_read); + + cmd_resp->address = cpu_to_le32(reg_addr); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == 0) + *reg_val = le32_to_cpu(cmd_resp->value); + + return status; +} + +/** + * i40evf_read_rx_ctl - read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + **/ +u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + u32 val = 0; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, + &val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + val = rd32(hw, reg_addr); + + return val; +} + +/** + * i40evf_aq_rx_ctl_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to write to an Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_rx_ctl_reg_write); + + cmd->address = cpu_to_le32(reg_addr); + cmd->value = cpu_to_le32(reg_val); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40evf_write_rx_ctl - write to an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + **/ +void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, + reg_val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + wr32(hw, reg_addr, reg_val); +} + +/** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure * @v_opcode: opcodes for VF-PF communication diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index cbd9a1b078ab..d89d52109efa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -103,4 +103,19 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); +i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); +i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); +i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 *value); +i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 value); +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 7a00657dacda..ebcc25c05796 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -129,15 +129,19 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) /** * i40evf_get_tx_pending - how many Tx descriptors not processed * @tx_ring: the ring of descriptors + * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40evf_get_tx_pending(struct i40e_ring *ring) +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - head = i40e_get_head(ring); + if (!in_sw) + head = i40e_get_head(ring); + else + head = ring->next_to_clean; tail = readl(ring->tail); if (head != tail) @@ -252,6 +256,22 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + unsigned int j = 0; + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + j = i40evf_get_tx_pending(tx_ring, false); + + if (budget && + ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), total_packets, total_bytes); @@ -276,39 +296,49 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } /** - * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors + * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about - * @q_vector: the vector on which to force writeback + * @q_vector: the vector on which to enable writeback * **/ -static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; + u32 val; - if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { - u32 val; + if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + return; - if (q_vector->arm_wb_state) - return; + if (q_vector->arm_wb_state) + return; - val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK; + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + - vsi->base_vector - 1), - val); - q_vector->arm_wb_state = true; - } else { - u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ - - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + - vsi->base_vector - 1), val); - } + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), val); + q_vector->arm_wb_state = true; +} + +/** + * i40evf_force_wb - Issue SW Interrupt so HW does a wb + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK + /* allow 00 to be written to the index */; + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), + val); } /** @@ -506,7 +536,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (rx_bi->page_dma) { dma_unmap_page(dev, rx_bi->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE, DMA_FROM_DEVICE); rx_bi->page_dma = 0; } @@ -641,16 +671,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + const int current_node = numa_node_id(); /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -658,56 +691,79 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) if (bi->skb) /* desc is in use */ goto no_buffers; + + /* If we've been moved to a different NUMA node, release the + * page so we can get a new one on the current node. + */ + if (bi->page && page_to_nid(bi->page) != current_node) { + dma_unmap_page(rx_ring->dev, + bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(bi->page); + bi->page = NULL; + bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else if (bi->page) { + rx_ring->rx_stats.page_reuse_count++; + } + if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { rx_ring->rx_stats.alloc_page_failed++; goto no_buffers; } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; bi->page_dma = dma_map_page(rx_ring->dev, bi->page, - bi->page_offset, - PAGE_SIZE / 2, + 0, + PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { + if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { rx_ring->rx_stats.alloc_page_failed++; + __free_page(bi->page); + bi->page = NULL; bi->page_dma = 0; + bi->page_offset = 0; goto no_buffers; } + bi->page_offset = 0; } - dma_sync_single_range_for_device(rx_ring->dev, - bi->dma, - 0, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -716,7 +772,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -724,8 +780,10 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; goto no_buffers; @@ -743,6 +801,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) if (dma_mapping_error(rx_ring->dev, bi->dma)) { rx_ring->rx_stats.alloc_buff_failed++; bi->dma = 0; + dev_kfree_skb(bi->skb); + bi->skb = NULL; goto no_buffers; } } @@ -754,9 +814,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** @@ -791,16 +861,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4 = false, ipv6 = false; - bool ipv4_tunnel, ipv6_tunnel; - __wsum rx_udp_csum; - struct iphdr *iph; - __sum16 csum; - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; @@ -816,12 +877,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (!(decoded.known && decoded.outer_ip)) return; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) - ipv4 = true; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) - ipv6 = true; + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -845,36 +904,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN traffic has an outer UDPv4 checksum we need to check - * it in the driver, hardware does not do it for us. - * Since L3L4P bit was set we assume a valid IHL value (>=5) - * so the total length of IPv4 header is IHL*4 bytes - * The UDP_0 bit *may* bet set if the *inner* header is UDP + /* The hardware supported by this driver does not validate outer + * checksums for tunneled VXLAN or GENEVE frames. I don't agree + * with it but the specification states that you "MAY validate", it + * doesn't make it a hard requirement so if we have validated the + * inner checksum report CHECKSUM_UNNECESSARY. */ - if (ipv4_tunnel) { - skb->transport_header = skb->mac_header + - sizeof(struct ethhdr) + - (ip_hdr(skb)->ihl * 4); - - /* Add 4 bytes for VLAN tagged packets */ - skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - ? VLAN_HLEN : 0; - - if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && - (udp_hdr(skb)->check != 0)) { - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - (skb->len - - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); - - if (udp_hdr(skb)->check != csum) - goto checksum_fail; - - } /* else its GRE and so no outer UDP header */ - } + + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; @@ -939,18 +979,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + bool failure = false; u8 rx_ptype; u64 qword; + u32 copysize; do { struct i40e_rx_buffer *rx_bi; @@ -958,7 +999,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + failure = failure || + i40evf_alloc_rx_buffers_ps(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -976,13 +1019,22 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) * DD bit is set. */ dma_rmb(); + /* sync header buffer for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; + failure = true; break; } @@ -990,8 +1042,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_record_rx_queue(skb, rx_ring->queue_index); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); } @@ -1009,9 +1061,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - prefetch(rx_bi->page); + /* sync half-page for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->page_dma, + rx_bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + prefetch(page_address(rx_bi->page) + rx_bi->page_offset); rx_bi->skb = NULL; cleaned_count++; + copysize = 0; if (rx_hbo || rx_sph) { int len; @@ -1022,38 +1081,50 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); } else if (skb->len == 0) { int len; + unsigned char *va = page_address(rx_bi->page) + + rx_bi->page_offset; - len = (rx_packet_len > skb_headlen(skb) ? - skb_headlen(skb) : rx_packet_len); - memcpy(__skb_put(skb, len), - rx_bi->page + rx_bi->page_offset, - len); - rx_bi->page_offset += len; + len = min(rx_packet_len, rx_ring->rx_hdr_len); + memcpy(__skb_put(skb, len), va, len); + copysize = len; rx_packet_len -= len; } - /* Get the rest of the data if this was a header split */ if (rx_packet_len) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset, - rx_packet_len); - - skb->len += rx_packet_len; - skb->data_len += rx_packet_len; - skb->truesize += rx_packet_len; - - if ((page_count(rx_bi->page) == 1) && - (page_to_nid(rx_bi->page) == current_node)) - get_page(rx_bi->page); - else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset + copysize, + rx_packet_len, I40E_RXBUFFER_2048); + + /* If the page count is more than 2, then both halves + * of the page are used and we need to free it. Do it + * here instead of in the alloc code. Otherwise one + * of the half-pages might be released between now and + * then, and we wouldn't know which one to use. + * Don't call get_page and free_page since those are + * both expensive atomic operations that just change + * the refcount in opposite directions. Just give the + * page to the stack; he can have our refcount. + */ + if (page_count(rx_bi->page) > 2) { + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); rx_bi->page = NULL; + rx_bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else { + get_page(rx_bi->page); + /* switch to the other half-page here; the + * allocation code programs the right addr + * into HW. If we haven't used this half-page, + * the address won't be changed, and HW can + * just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; + } - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; } I40E_RX_INCREMENT(rx_ring, i); @@ -1105,7 +1176,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } /** @@ -1123,6 +1194,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; u16 rx_packet_len; + bool failure = false; u8 rx_ptype; u64 qword; u16 i; @@ -1133,7 +1205,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + failure = failure || + i40evf_alloc_rx_buffers_1buf(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1214,7 +1288,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -1222,7 +1296,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); @@ -1335,7 +1411,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + clean_complete = clean_complete && + i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1359,7 +1436,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ - clean_complete &= (budget_per_ring != cleaned); + clean_complete = clean_complete && (budget_per_ring > cleaned); } /* If work not completed, return budget and polling will return */ @@ -1367,7 +1444,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40evf_force_wb(vsi, q_vector); + i40e_enable_wb_on_itr(vsi, q_vector); } return budget; } @@ -1447,13 +1524,23 @@ out: static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { - u32 cd_cmd, cd_tso_len, cd_mss; - struct ipv6hdr *ipv6h; - struct tcphdr *tcph; - struct iphdr *iph; - u32 l4len; + u64 cd_cmd, cd_tso_len, cd_mss; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -1461,35 +1548,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); - - if (iph->version == 4) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - } else if (ipv6h->version == 6) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); - ipv6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, - 0, IPPROTO_TCP, 0); + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + /* determine offset of outer transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from outer checksum */ + paylen = (__force u16)l4.udp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.udp->check = ~csum_fold((__force __wsum)paylen); + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } } - l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); - *hdr_len = (skb->encapsulation - ? (skb_inner_transport_header(skb) - skb->data) - : skb_transport_offset(skb)) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = (__force u16)l4.tcp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.tcp->check = ~csum_fold((__force __wsum)paylen); + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = skb_shinfo(skb)->gso_size; - *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - ((u64)cd_tso_len << - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); return 1; } @@ -1499,129 +1611,157 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, - u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, - u32 *cd_tunneling) +static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) { - struct ipv6hdr *this_ipv6_hdr; - unsigned int this_tcp_hdrlen; - struct iphdr *this_ip_hdr; - u32 network_hdr_len; - u8 l4_hdr = 0; - struct udphdr *oudph; - struct iphdr *oiph; - u32 l4_tunnel = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + u32 offset, cmd = 0, tunnel = 0; + __be16 frag_off; + u8 l4_proto = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { - switch (ip_hdr(skb)->protocol) { + /* define outer network header type */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_CTX_EXT_IP_IPV4 : + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* define outer transport */ + switch (l4_proto) { case IPPROTO_UDP: - oudph = udp_hdr(skb); - oiph = ip_hdr(skb); - l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + tunnel |= I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + break; + case IPPROTO_GRE: + tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: - return; - } - network_hdr_len = skb_inner_network_header_len(skb); - this_ip_hdr = inner_ip_hdr(skb); - this_ipv6_hdr = inner_ipv6_hdr(skb); - this_tcp_hdrlen = inner_tcp_hdrlen(skb); - - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; if (*tx_flags & I40E_TX_FLAGS_TSO) - ip_hdr(skb)->check = 0; - } + return -1; - /* Now set the ctx descriptor fields */ - *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - l4_tunnel | - ((skb_inner_network_offset(skb) - - skb_transport_offset(skb)) >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (this_ip_hdr->version == 6) { - *tx_flags &= ~I40E_TX_FLAGS_IPV4; - *tx_flags |= I40E_TX_FLAGS_IPV6; + skb_checksum_help(skb); + return 0; } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && - (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && - (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { - oudph->check = ~csum_tcpudp_magic(oiph->saddr, - oiph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, 0); - *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - } - } else { - network_hdr_len = skb_network_header_len(skb); - this_ip_hdr = ip_hdr(skb); - this_ipv6_hdr = ipv6_hdr(skb); - this_tcp_hdrlen = tcp_hdrlen(skb); + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + /* indicate if we need to offload outer UDP header */ + if ((*tx_flags & I40E_TX_FLAGS_TSO) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + + /* record tunnel offload values */ + *cd_tunneling |= tunnel; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + if (ip.v6->version == 6) + *tx_flags |= I40E_TX_FLAGS_IPV6; } /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - l4_hdr = this_ip_hdr->protocol; + l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - this_ip_hdr->check = 0; - } else { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; - } - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : + I40E_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_hdr = this_ipv6_hdr->nexthdr; - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } - /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ - *td_offset |= (skb_network_offset(skb) >> 1) << - I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* compute inner L3 header size */ + offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ - switch (l4_hdr) { + switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (this_tcp_hdrlen >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - break; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; } + + *td_cmd |= cmd; + *td_offset |= offset; + + return 1; } /** @@ -1656,59 +1796,71 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, } /** - * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer - * @tx_flags: collected send information * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) +bool __i40evf_chk_linearize(struct sk_buff *skb) { - struct skb_frag_struct *frag; - bool linearize = false; - unsigned int size = 0; - u16 num_frags; - u16 gso_segs; + const struct skb_frag_struct *frag, *stale; + int gso_size, nr_frags, sum; - num_frags = skb_shinfo(skb)->nr_frags; - gso_segs = skb_shinfo(skb)->gso_segs; + /* check to see if TSO is enabled, if so we may get a repreive */ + gso_size = skb_shinfo(skb)->gso_size; + if (unlikely(!gso_size)) + return true; - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 0; + /* no need to check if number of frags is less than 8 */ + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < I40E_MAX_BUFFER_TXD) + return false; - if (num_frags < (I40E_MAX_BUFFER_TXD)) - goto linearize_chk_done; - /* try the simple math, if we have too many frags per segment */ - if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > - I40E_MAX_BUFFER_TXD) { - linearize = true; - goto linearize_chk_done; - } - frag = &skb_shinfo(skb)->frags[0]; - /* we might still have more fragments per segment */ - do { - size += skb_frag_size(frag); - frag++; j++; - if ((size >= skb_shinfo(skb)->gso_size) && - (j < I40E_MAX_BUFFER_TXD)) { - size = (size % skb_shinfo(skb)->gso_size); - j = (size) ? 1 : 0; - } - if (j == I40E_MAX_BUFFER_TXD) { - linearize = true; - break; - } - num_frags--; - } while (num_frags); - } else { - if (num_frags >= I40E_MAX_BUFFER_TXD) - linearize = true; + /* We need to walk through the list and validate that each group + * of 6 fragments totals at least gso_size. However we don't need + * to perform such validation on the first or last 6 since the first + * 6 cannot inherit any data from a descriptor before them, and the + * last 6 cannot inherit any data from a descriptor after them. + */ + nr_frags -= I40E_MAX_BUFFER_TXD - 1; + frag = &skb_shinfo(skb)->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We + * use this as the worst case scenerio in which the frag ahead + * of us only provides one byte which is why we are limited to 6 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - gso_size; + + /* Add size of frags 1 through 5 to create our initial sum */ + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + stale = &skb_shinfo(skb)->frags[0]; + for (;;) { + sum += skb_frag_size(++frag); + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + /* use pre-decrement to avoid processing last fragment */ + if (!--nr_frags) + break; + + sum -= skb_frag_size(++stale); } -linearize_chk_done: - return linearize; + return false; } /** @@ -1718,7 +1870,7 @@ linearize_chk_done: * * Returns -EBUSY if a stop is needed, else 0 **/ -static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ @@ -1735,20 +1887,6 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * i40evf_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40evf_maybe_stop_tx(tx_ring, size); -} - -/** * i40evf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer @@ -1863,7 +2001,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), first->bytecount); - i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); /* Algorithm to optimize tail and RS bit setting: * if xmit_more is supported @@ -1946,38 +2084,6 @@ dma_error: } /** - * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed - * @skb: send buffer - * @tx_ring: ring to send buffer on - * - * Returns number of data descriptors needed for this skb. Returns 0 to indicate - * there is not enough descriptors available in this ring since we need at least - * one descriptor. - **/ -static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -{ - unsigned int f; - int count = 0; - - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 4 desc gap to avoid the cache line where head is, - * + 1 desc for context descriptor, - * otherwise try next time - */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - - count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { - tx_ring->tx_stats.tx_busy++; - return 0; - } - return count; -} - -/** * i40e_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer * @tx_ring: ring to send buffer on @@ -1995,13 +2101,29 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; - int tso; + int tso, count; /* prefetch the data, we'll need it later */ prefetch(skb->data); - if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) + count = i40e_xmit_descriptor_count(skb); + if (i40e_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) + goto out_drop; + count = TXD_USE_COUNT(skb->len); + tx_ring->tx_stats.tx_linearize++; + } + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* prepare the xmit flags */ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) @@ -2026,24 +2148,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - if (i40e_chk_linearize(skb, tx_flags)) { - if (skb_linearize(skb)) - goto out_drop; - tx_ring->tx_stats.tx_linearize++; - } + /* Always offload the checksum, since it's in the data descriptor */ + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; - /* Always offload the checksum, since it's in the data descriptor */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - tx_flags |= I40E_TX_FLAGS_CSUM; - - i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - } - i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index e29bb3e86cfd..c1dd8c5c9666 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -153,7 +153,6 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM BIT(0) #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) @@ -202,12 +201,15 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; }; struct i40e_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; + u64 page_reuse_count; + u64 realloc_count; }; enum i40e_ring_state_t { @@ -253,7 +255,6 @@ struct i40e_ring { #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 - u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 @@ -273,7 +274,6 @@ struct i40e_ring { u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) /* stats structs */ struct i40e_queue_stats stats; @@ -313,8 +313,8 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); @@ -324,7 +324,10 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); -u32 i40evf_get_tx_pending(struct i40e_ring *ring); +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); +int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); +bool __i40evf_chk_linearize(struct sk_buff *skb); /** * i40e_get_head - Retrieve head from head writeback @@ -339,4 +342,63 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } + +/** + * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + int count = 0, size = skb_headlen(skb); + + for (;;) { + count += TXD_USE_COUNT(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +/** + * i40e_maybe_stop_tx - 1st level check for Tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} + +/** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @count: number of buffers used + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) +{ + /* we can only support up to 8 data buffers for a single send */ + if (likely(count <= I40E_MAX_BUFFER_TXD)) + return false; + + return __i40evf_chk_linearize(skb); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index be1b72b93888..e657eccd232c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -173,6 +173,7 @@ enum i40evf_state_t { __I40EVF_RESETTING, /* in reset */ /* Below here, watchdog is running */ __I40EVF_DOWN, /* ready, can be opened */ + __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */ __I40EVF_TESTING, /* in ethtool self-test */ __I40EVF_RUNNING, /* opened, working */ }; @@ -273,6 +274,9 @@ struct i40evf_adapter { }; +/* Ethtool Private Flags */ +#define I40EVF_PRIV_FLAGS_PS BIT(0) + /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; extern const char i40evf_driver_version[]; @@ -280,6 +284,7 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); int i40evf_process_config(struct i40evf_adapter *adapter); +void i40evf_schedule_reset(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index a4c9feb589e7..dd4430aae7fa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -63,6 +63,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) +static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = { + "packet-split", +}; + +#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings) + /** * i40evf_get_settings - Get Link Speed and Duplex settings * @netdev: network interface device structure @@ -97,6 +103,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return I40EVF_STATS_LEN(netdev); + else if (sset == ETH_SS_PRIV_FLAGS) + return I40EVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -162,6 +170,12 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); p += ETH_GSTRING_LEN; } + } else if (sset == ETH_SS_PRIV_FLAGS) { + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40evf_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } } } @@ -211,6 +225,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; } /** @@ -459,6 +474,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, struct ethtool_rxnfc *nfc) { struct i40e_hw *hw = &adapter->hw; + u32 flags = adapter->vf_res->vf_offload_flags; u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); @@ -477,54 +493,50 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, switch (nfc->flow_type) { case TCP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - default: + } else { return -EINVAL; } break; case TCP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - default: + } else { return -EINVAL; } break; case UDP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - default: + } else { return -EINVAL; } break; case UDP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: + } else { return -EINVAL; } break; @@ -713,6 +725,54 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, I40EVF_HLUT_ARRAY_SIZE); } +/** + * i40evf_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40evf_get_priv_flags(struct net_device *dev) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + u32 ret_flags = 0; + + ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ? + I40EVF_PRIV_FLAGS_PS : 0; + + return ret_flags; +} + +/** + * i40evf_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int i40evf_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + bool reset_required = false; + + if ((flags & I40EVF_PRIV_FLAGS_PS) && + !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { + adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; + reset_required = true; + } else if (!(flags & I40EVF_PRIV_FLAGS_PS) && + (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; + reset_required = true; + } + + /* if needed, issue reset to cause things to take effect */ + if (reset_required) + i40evf_schedule_reset(adapter); + + return 0; +} + static const struct ethtool_ops i40evf_ethtool_ops = { .get_settings = i40evf_get_settings, .get_drvinfo = i40evf_get_drvinfo, @@ -722,6 +782,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_strings = i40evf_get_strings, .get_ethtool_stats = i40evf_get_ethtool_stats, .get_sset_count = i40evf_get_sset_count, + .get_priv_flags = i40evf_get_priv_flags, + .set_priv_flags = i40evf_set_priv_flags, .get_msglevel = i40evf_get_msglevel, .set_msglevel = i40evf_set_msglevel, .get_coalesce = i40evf_get_coalesce, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 94da913b151d..4b70aae2fa84 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -32,13 +32,13 @@ static int i40evf_close(struct net_device *netdev); char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = - "Intel(R) XL710/X710 Virtual Function Network Driver"; + "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 4 +#define DRV_VERSION_BUILD 15 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -69,6 +69,8 @@ MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *i40evf_wq; + /** * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -171,6 +173,19 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) } /** + * i40evf_schedule_reset - Set the flags and schedule a reset event + * @adapter: board private structure + **/ +void i40evf_schedule_reset(struct i40evf_adapter *adapter) +{ + if (!(adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } +} + +/** * i40evf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ @@ -179,11 +194,7 @@ static void i40evf_tx_timeout(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | - I40EVF_FLAG_RESET_NEEDED))) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } + i40evf_schedule_reset(adapter); } /** @@ -636,35 +647,22 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) int rx_buf_len; - adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE; - adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; - - /* Decide whether to use packet split mode or not */ - if (netdev->mtu > ETH_DATA_LEN) { - if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE) - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - else - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - } else { - if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE) - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - else - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - } - /* Set the RX buffer length according to the mode */ - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - rx_buf_len = I40E_RX_HDR_SIZE; - } else { - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = I40EVF_RXBUFFER_2048; - else - rx_buf_len = ALIGN(max_frame, 1024); - } + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED || + netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = I40EVF_RXBUFFER_2048; + else + rx_buf_len = ALIGN(max_frame, 1024); for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i].rx_buf_len = rx_buf_len; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + set_ring_ps_enabled(&adapter->rx_rings[i]); + adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE; + } else { + clear_ring_ps_enabled(&adapter->rx_rings[i]); + } } } @@ -1001,7 +999,12 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + i40evf_alloc_rx_headers(ring); + i40evf_alloc_rx_buffers_ps(ring, ring->count); + } else { i40evf_alloc_rx_buffers_1buf(ring, ring->count); + } ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -1032,7 +1035,7 @@ void i40evf_down(struct i40evf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct i40evf_mac_filter *f; - if (adapter->state == __I40EVF_DOWN) + if (adapter->state <= __I40EVF_DOWN_PENDING) return; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, @@ -1122,7 +1125,9 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) if (!adapter->vsi_res) return; kfree(adapter->tx_rings); + adapter->tx_rings = NULL; kfree(adapter->rx_rings); + adapter->rx_rings = NULL; } /** @@ -1454,7 +1459,11 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter) int ret; /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - hena = I40E_DEFAULT_RSS_HENA; + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + else + hena = I40E_DEFAULT_RSS_HENA; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); @@ -1829,6 +1838,7 @@ static void i40evf_reset_task(struct work_struct *work) break; msleep(I40EVF_RESET_WAIT_MS); } + pci_set_master(adapter->pdev); /* extra wait to make sure minimum wait is met */ msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { @@ -1873,6 +1883,7 @@ static void i40evf_reset_task(struct work_struct *work) adapter->netdev->flags &= ~IFF_UP; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + adapter->state = __I40EVF_DOWN; dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2142,7 +2153,8 @@ static int i40evf_open(struct net_device *netdev) dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - if (adapter->state != __I40EVF_DOWN || adapter->aq_required) + + if (adapter->state != __I40EVF_DOWN) return -EBUSY; /* allocate transmit descriptors */ @@ -2197,14 +2209,14 @@ static int i40evf_close(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); - if (adapter->state <= __I40EVF_DOWN) + if (adapter->state <= __I40EVF_DOWN_PENDING) return 0; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_down(adapter); - adapter->state = __I40EVF_DOWN; + adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); return 0; @@ -2325,9 +2337,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter) NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | NETIF_F_GRO; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; netdev->hw_features &= ~NETIF_F_RXCSUM; @@ -2466,11 +2493,20 @@ static void i40evf_init_task(struct work_struct *work) default: goto err_alloc; } + + if (hw->mac.type == I40E_MAC_X722_VF) + adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE; + if (i40evf_process_config(adapter)) goto err_alloc; adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; + adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE; + + /* Default to single buffer rx, can be changed through ethtool. */ + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); @@ -2502,10 +2538,9 @@ static void i40evf_init_task(struct work_struct *work) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; - if (!RSS_AQ(adapter)) - i40evf_init_rss(adapter); + err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -2885,6 +2920,11 @@ static int __init i40evf_init_module(void) pr_info("%s\n", i40evf_copyright); + i40evf_wq = create_singlethread_workqueue(i40evf_driver_name); + if (!i40evf_wq) { + pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); + return -ENOMEM; + } ret = pci_register_driver(&i40evf_driver); return ret; } @@ -2900,6 +2940,7 @@ module_init(i40evf_init_module); static void __exit i40evf_exit_module(void) { pci_unregister_driver(&i40evf_driver); + destroy_workqueue(i40evf_wq); } module_exit(i40evf_exit_module); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index c1c526283757..488e738f76c6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -270,6 +270,10 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + vqpi->rxq.splithdr_enabled = true; + vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE; + } vqpi++; } @@ -804,6 +808,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case I40E_VIRTCHNL_OP_DISABLE_QUEUES: i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); + if (adapter->state == __I40EVF_DOWN_PENDING) + adapter->state = __I40EVF_DOWN; break; case I40E_VIRTCHNL_OP_VERSION: case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index adb33e2a0137..a23aa6704394 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -34,6 +34,7 @@ #include "e1000_mac.h" #include "e1000_82575.h" #include "e1000_i210.h" +#include "igb.h" static s32 igb_get_invariants_82575(struct e1000_hw *); static s32 igb_acquire_phy_82575(struct e1000_hw *); @@ -71,6 +72,32 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + int i; + + for (i = 10; i--;) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); + adapter->shadow_vfta[offset] = value; +} + /** * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO * @hw: pointer to the HW structure @@ -398,6 +425,8 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) /* Set mta register count */ mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; /* Set rar entry count */ switch (mac->type) { case e1000_82576: @@ -429,6 +458,11 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; } + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + mac->ops.write_vfta = igb_write_vfta_i350; + else + mac->ops.write_vfta = igb_write_vfta; + /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Set if manageability features are enabled. */ @@ -1517,10 +1551,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) /* Disabling VLAN filtering */ hw_dbg("Initializing the IEEE VLAN\n"); - if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) - igb_clear_vfta_i350(hw); - else - igb_clear_vfta(hw); + igb_clear_vfta(hw); /* Setup the receive address */ igb_init_rx_addrs(hw, rar_count); @@ -2889,7 +2920,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = { #endif }; -static struct e1000_phy_operations e1000_phy_ops_82575 = { +static const struct e1000_phy_operations e1000_phy_ops_82575 = { .acquire = igb_acquire_phy_82575, .get_cfg_done = igb_get_cfg_done_82575, .release = igb_release_phy_82575, diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 2154aea7aa7e..de8805a2a2fe 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -56,10 +56,10 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, #define E1000_SRRCTL_TIMESTAMP 0x40000000 -#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 #define E1000_MRQC_ENABLE_VMDQ 0x00000003 #define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005 #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index c3c598c347a9..e9f23ee8f15e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -356,7 +356,8 @@ /* Ethertype field values */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 /* PBA constants */ #define E1000_PBA_34K 0x0022 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 4034207eb5cc..2fb2213cd562 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -325,7 +325,7 @@ struct e1000_mac_operations { s32 (*get_thermal_sensor_data)(struct e1000_hw *); s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); #endif - + void (*write_vfta)(struct e1000_hw *, u32, u32); }; struct e1000_phy_operations { @@ -372,7 +372,7 @@ struct e1000_thermal_sensor_data { struct e1000_info { s32 (*get_invariants)(struct e1000_hw *); struct e1000_mac_operations *mac_ops; - struct e1000_phy_operations *phy_ops; + const struct e1000_phy_operations *phy_ops; struct e1000_nvm_operations *nvm_ops; }; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 2a88595f956c..07cf4fe58338 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -92,10 +92,8 @@ void igb_clear_vfta(struct e1000_hw *hw) { u32 offset; - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - array_wr32(E1000_VFTA, offset, 0); - wrfl(); - } + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); } /** @@ -107,54 +105,14 @@ void igb_clear_vfta(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) { + struct igb_adapter *adapter = hw->back; + array_wr32(E1000_VFTA, offset, value); wrfl(); -} - -/* Due to a hw errata, if the host tries to configure the VFTA register - * while performing queries from the BMC or DMA, then the VFTA in some - * cases won't be written. - */ -/** - * igb_clear_vfta_i350 - Clear VLAN filter table - * @hw: pointer to the HW structure - * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. - **/ -void igb_clear_vfta_i350(struct e1000_hw *hw) -{ - u32 offset; - int i; - - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, 0); - - wrfl(); - } -} - -/** - * igb_write_vfta_i350 - Write value to VLAN filter table - * @hw: pointer to the HW structure - * @offset: register offset in VLAN filter table - * @value: register value written to VLAN filter table - * - * Writes value at the given offset in the register array which stores - * the VLAN filter table. - **/ -static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) -{ - int i; - - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, value); - - wrfl(); + adapter->shadow_vfta[offset] = value; } /** @@ -183,40 +141,155 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) } /** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + +/** * igb_vfta_set - enable or disable vlan in VLAN filter table * @hw: pointer to the HW structure - * @vid: VLAN id to add or remove - * @add: if true add filter, if false remove + * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id + * @vlan_on: if true add filter, if false remove * * Sets or clears a bit in the VLAN filter table array based on VLAN id * and if we are adding or removing the filter **/ -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) { - u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; - u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); - u32 vfta; struct igb_adapter *adapter = hw->back; - s32 ret_val = 0; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; - vfta = adapter->shadow_vfta[index]; + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; - /* bit was set/cleared before we started */ - if ((!!(vfta & mask)) == add) { - ret_val = -E1000_ERR_CONFIG; - } else { - if (add) - vfta |= mask; - else - vfta &= ~mask; + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = adapter->shadow_vfta[regidx]; + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; } - if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) - igb_write_vfta_i350(hw, index, vfta); - else - igb_write_vfta(hw, index, vfta); - adapter->shadow_vfta[index] = vfta; - return ret_val; + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: + /* bit was set/cleared before we started */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + return 0; } /** diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index ea24961b0d70..90c8893c3eed 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -56,8 +56,9 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, void igb_clear_hw_cntrs_base(struct e1000_hw *hw); void igb_clear_vfta(struct e1000_hw *hw); -void igb_clear_vfta_i350(struct e1000_hw *hw); -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind, + bool vlan_on, bool vlvf_bypass); void igb_config_collision_dist(struct e1000_hw *hw); void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); void igb_mta_set(struct e1000_hw *hw, u32 hash_value); diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 162cc49345d0..10f5c9e016a9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -322,14 +322,20 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; + int count = 10; - /* Take ownership of the buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + do { + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); - /* reserve mailbox for vf use */ - p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); - if (p2v_mailbox & E1000_P2VMAILBOX_PFU) - ret_val = 0; + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); return ret_val; } diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index e3cb93bdb21a..9413fa61392f 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -95,7 +95,6 @@ struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; - u16 vlans_enabled; u32 flags; unsigned long last_nack; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ @@ -482,6 +481,7 @@ struct igb_adapter { #define IGB_FLAG_MAS_ENABLE (1 << 12) #define IGB_FLAG_HAS_MSIX (1 << 13) #define IGB_FLAG_EEE (1 << 14) +#define IGB_FLAG_VLAN_PROMISC BIT(15) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 @@ -510,6 +510,8 @@ enum igb_boards { extern char igb_driver_name[]; extern char igb_driver_version[]; +int igb_open(struct net_device *netdev); +int igb_close(struct net_device *netdev); int igb_up(struct igb_adapter *); void igb_down(struct igb_adapter *); void igb_reinit_locked(struct igb_adapter *); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 1d329f1d047b..7982243d1f9b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2017,7 +2017,7 @@ static void igb_diag_test(struct net_device *netdev, if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + igb_close(netdev); else igb_reset(adapter); @@ -2050,7 +2050,7 @@ static void igb_diag_test(struct net_device *netdev, clear_bit(__IGB_TESTING, &adapter->state); if (if_running) - dev_open(netdev); + igb_open(netdev); } else { dev_info(&adapter->pdev->dev, "online testing starting\n"); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 31e5f3942839..55a1405cb2a1 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -122,8 +122,8 @@ static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void igb_remove(struct pci_dev *pdev); static int igb_sw_init(struct igb_adapter *); -static int igb_open(struct net_device *); -static int igb_close(struct net_device *); +int igb_open(struct net_device *); +int igb_close(struct net_device *); static void igb_configure(struct igb_adapter *); static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); @@ -140,7 +140,7 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); -static void igb_set_uta(struct igb_adapter *adapter); +static void igb_set_uta(struct igb_adapter *adapter, bool set); static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *); @@ -1534,12 +1534,13 @@ static void igb_irq_enable(struct igb_adapter *adapter) static void igb_update_mng_vlan(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */ - igb_vfta_set(hw, vid, true); + igb_vfta_set(hw, vid, pf_id, true, true); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; @@ -1549,7 +1550,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) (vid != old_vid) && !test_bit(old_vid, adapter->active_vlans)) { /* remove VID from filter table */ - igb_vfta_set(hw, old_vid, false); + igb_vfta_set(hw, vid, pf_id, false, true); } } @@ -1818,6 +1819,10 @@ void igb_down(struct igb_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) igb_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); #ifdef CONFIG_IGB_DCA @@ -1862,7 +1867,7 @@ void igb_reset(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; - u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; + u32 pba, hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. @@ -1886,9 +1891,10 @@ void igb_reset(struct igb_adapter *adapter) break; } - if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && - (mac->type < e1000_82576)) { - /* adjust PBA for jumbo frames */ + if (mac->type == e1000_82575) { + u32 min_rx_space, min_tx_space, needed_tx_space; + + /* write Rx PBA so that hardware can report correct Tx PBA */ wr32(E1000_PBA, pba); /* To maintain wire speed transmits, the Tx FIFO should be @@ -1898,31 +1904,26 @@ void igb_reset(struct igb_adapter *adapter) * one full receive packet and is similarly rounded up and * expressed in KB. */ - pba = rd32(E1000_PBA); - /* upper 16 bits has Tx packet buffer allocation size in KB */ - tx_space = pba >> 16; - /* lower 16 bits has Rx packet buffer allocation size in KB */ - pba &= 0xffff; - /* the Tx fifo also stores 16 bytes of information about the Tx - * but don't include ethernet FCS because hardware appends it + min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); + + /* The Tx FIFO also stores 16 bytes of information about the Tx + * but don't include Ethernet FCS because hardware appends it. + * We only need to round down to the nearest 512 byte block + * count since the value we care about is 2 frames, not 1. */ - min_tx_space = (adapter->max_frame_size + - sizeof(union e1000_adv_tx_desc) - - ETH_FCS_LEN) * 2; - min_tx_space = ALIGN(min_tx_space, 1024); - min_tx_space >>= 10; - /* software strips receive CRC, so leave room for it */ - min_rx_space = adapter->max_frame_size; - min_rx_space = ALIGN(min_rx_space, 1024); - min_rx_space >>= 10; + min_tx_space = adapter->max_frame_size; + min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; + min_tx_space = DIV_ROUND_UP(min_tx_space, 512); + + /* upper 16 bits has Tx packet buffer allocation size in KB */ + needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); /* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO - * allocation, take space away from current Rx allocation + * allocation, take space away from current Rx allocation. */ - if (tx_space < min_tx_space && - ((min_tx_space - tx_space) < pba)) { - pba = pba - (min_tx_space - tx_space); + if (needed_tx_space < pba) { + pba -= needed_tx_space; /* if short on Rx space, Rx wins and must trump Tx * adjustment @@ -1930,18 +1931,20 @@ void igb_reset(struct igb_adapter *adapter) if (pba < min_rx_space) pba = min_rx_space; } + + /* adjust PBA for jumbo frames */ wr32(E1000_PBA, pba); } - /* flow control settings */ - /* The high water mark must be low enough to fit one full frame - * (or the size used for early receive) above it in the Rx FIFO. - * Set it to the lower of: - * - 90% of the Rx FIFO size, or - * - the full Rx FIFO size minus one full frame + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame */ - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - 2 * adapter->max_frame_size)); + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; @@ -2051,7 +2054,7 @@ static int igb_set_features(struct net_device *netdev, if (changed & NETIF_F_HW_VLAN_CTAG_RX) igb_vlan_mode(netdev, features); - if (!(changed & NETIF_F_RXALL)) + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) return 0; netdev->features = features; @@ -2064,6 +2067,25 @@ static int igb_set_features(struct net_device *netdev, return 0; } +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct igb_adapter *adapter = netdev_priv(dev); + struct e1000_hw *hw = &adapter->hw; + int vfn = adapter->vfs_allocated_count; + int rar_entries = hw->mac.rar_entry_count - (vfn + 1); + + if (netdev_uc_count(dev) >= rar_entries) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2087,6 +2109,7 @@ static const struct net_device_ops igb_netdev_ops = { #endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, + .ndo_fdb_add = igb_ndo_fdb_add, .ndo_features_check = passthru_features_check, }; @@ -2349,27 +2372,35 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * assignment. */ netdev->features |= NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; + if (hw->mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CRC; + /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; netdev->hw_features |= NETIF_F_RXALL; + if (hw->mac.type >= e1000_i350) + netdev->hw_features |= NETIF_F_NTUPLE; + /* set this bit last since it cannot be part of hw_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - netdev->vlan_features |= NETIF_F_TSO | + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SG; + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; + + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -2378,11 +2409,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->vlan_features |= NETIF_F_HIGHDMA; } - if (hw->mac.type >= e1000_82576) { - netdev->hw_features |= NETIF_F_SCTP_CRC; - netdev->features |= NETIF_F_SCTP_CRC; - } - netdev->priv_flags |= IFF_UNICAST_FLT; adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); @@ -2515,6 +2541,26 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->wol = 0; } + /* Some vendors want the ability to Use the EEPROM setting as + * enable/disable only, and not for capability + */ + if (((hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354)) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (hw->mac.type == e1000_i350) { + if (((pdev->subsystem_device == 0x5001) || + (pdev->subsystem_device == 0x5002)) && + (hw->bus.func == 0)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (pdev->subsystem_device == 0x1F52) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + } + device_set_wakeup_enable(&adapter->pdev->dev, adapter->flags & IGB_FLAG_WOL_SUPPORTED); @@ -2921,14 +2967,6 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, /* Device supports enough interrupts without queue pairing. */ break; case e1000_82576: - /* If VFs are going to be allocated with RSS queues then we - * should pair the queues in order to conserve interrupts due - * to limited supply. - */ - if ((adapter->rss_queues > 1) && - (adapter->vfs_allocated_count > 6)) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - /* fall through */ case e1000_82580: case e1000_i350: case e1000_i354: @@ -2939,6 +2977,8 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, */ if (adapter->rss_queues > (max_rss_queues / 2)) adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; break; } } @@ -3132,7 +3172,7 @@ err_setup_tx: return err; } -static int igb_open(struct net_device *netdev) +int igb_open(struct net_device *netdev) { return __igb_open(netdev, false); } @@ -3169,7 +3209,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) return 0; } -static int igb_close(struct net_device *netdev) +int igb_close(struct net_device *netdev) { return __igb_close(netdev, false); } @@ -3460,12 +3500,12 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) wr32(E1000_VT_CTL, vtctl); } if (adapter->rss_queues > 1) - mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; else mrqc |= E1000_MRQC_ENABLE_VMDQ; } else { if (hw->mac.type != e1000_i211) - mrqc |= E1000_MRQC_ENABLE_RSS_4Q; + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; } igb_vmm_control(adapter); @@ -3498,7 +3538,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) /* disable store bad packets and clear size bits. */ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); - /* enable LPE to prevent packets larger than max_frame_size */ + /* enable LPE to allow for reception of jumbo frames */ rctl |= E1000_RCTL_LPE; /* disable queue 0 to prevent tail write w/o re-config */ @@ -3522,8 +3562,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ - rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ - E1000_RCTL_DPF | /* Allow filtered pause */ + rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs. @@ -3539,12 +3578,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, struct e1000_hw *hw = &adapter->hw; u32 vmolr; - /* if it isn't the PF check to see if VFs are enabled and - * increase the size to support vlan tags - */ - if (vfn < adapter->vfs_allocated_count && - adapter->vf_data[vfn].vlans_enabled) - size += VLAN_TAG_SIZE; + if (size > MAX_JUMBO_FRAME_SIZE) + size = MAX_JUMBO_FRAME_SIZE; vmolr = rd32(E1000_VMOLR(vfn)); vmolr &= ~E1000_VMOLR_RLPML_MASK; @@ -3554,30 +3589,26 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, return 0; } -/** - * igb_rlpml_set - set maximum receive packet size - * @adapter: board private structure - * - * Configure maximum receivable packet size. - **/ -static void igb_rlpml_set(struct igb_adapter *adapter) +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) { - u32 max_frame_size = adapter->max_frame_size; struct e1000_hw *hw = &adapter->hw; - u16 pf_id = adapter->vfs_allocated_count; + u32 val, reg; - if (pf_id) { - igb_set_vf_rlpml(adapter, max_frame_size, pf_id); - /* If we're in VMDQ or SR-IOV mode, then set global RLPML - * to our max jumbo frame size, in case we need to enable - * jumbo frames on one of the rings later. - * This will not pass over-length frames into the default - * queue because it's gated by the VMOLR.RLPML. - */ - max_frame_size = MAX_JUMBO_FRAME_SIZE; - } + if (hw->mac.type < e1000_82576) + return; - wr32(E1000_RLPML, max_frame_size); + if (hw->mac.type == e1000_i350) + reg = E1000_DVMOLR(vfn); + else + reg = E1000_VMOLR(vfn); + + val = rd32(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + wr32(reg, val); } static inline void igb_set_vmolr(struct igb_adapter *adapter, @@ -3593,14 +3624,6 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, return; vmolr = rd32(E1000_VMOLR(vfn)); - vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ - if (hw->mac.type == e1000_i350) { - u32 dvmolr; - - dvmolr = rd32(E1000_DVMOLR(vfn)); - dvmolr |= E1000_DVMOLR_STRVLAN; - wr32(E1000_DVMOLR(vfn), dvmolr); - } if (aupe) vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ else @@ -3684,9 +3707,6 @@ static void igb_configure_rx(struct igb_adapter *adapter) { int i; - /* set UTA to appropriate mode */ - igb_set_uta(adapter); - /* set the correct pool for the PF default MAC address in entry 0 */ igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, adapter->vfs_allocated_count); @@ -4004,6 +4024,130 @@ static int igb_write_uc_addr_list(struct net_device *netdev) return count; } +static int igb_vlan_promisc_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, pf_id; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + /* VLAN filtering needed for VLAN prio filter */ + if (adapter->netdev->features & NETIF_F_NTUPLE) + break; + /* fall through */ + case e1000_82576: + case e1000_82580: + case e1000_i354: + /* VLAN filtering needed for pool filtering */ + if (adapter->vfs_allocated_count) + break; + /* fall through */ + default: + return 1; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + return 0; + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + /* Add PF to all active pools */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + vlvf |= 1 << pf_id; + wr32(E1000_VLVF(i), vlvf); + } + +set_vfta: + /* Set all bits in the VLAN filter table array */ + for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) + hw->mac.ops.write_vfta(hw, i, ~0U); + + /* Set flag so we don't redo unnecessary work */ + adapter->flags |= IGB_FLAG_VLAN_PROMISC; + + return 0; +} + +#define VFTA_BLOCK_SIZE 8 +static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits, pf_id; + + /* guarantee that we don't scrub out management VLAN */ + vid = adapter->mng_vlan_id; + if (vid >= vid_start && vid < vid_end) + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern ourselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf & E1000_VLVF_VLANID_ENABLE) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + bits = ~(1 << pf_id); + bits &= rd32(E1000_VLVF(i)); + wr32(E1000_VLVF(i), bits); + } + +set_vfta: + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); + } +} + +static void igb_vlan_promisc_disable(struct igb_adapter *adapter) +{ + u32 i; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) + igb_scrub_vfta(adapter, i); +} + /** * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -4018,21 +4162,17 @@ static void igb_set_rx_mode(struct net_device *netdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; - u32 rctl, vmolr = 0; + u32 rctl = 0, vmolr = 0; int count; /* Check for Promiscuous and All Multicast modes */ - rctl = rd32(E1000_RCTL); - - /* clear the effected bits */ - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); - if (netdev->flags & IFF_PROMISC) { - /* retain VLAN HW filtering if in VT mode */ - if (adapter->vfs_allocated_count) - rctl |= E1000_RCTL_VFE; - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + + /* enable use of UTA filter to force packets to default pool */ + if (hw->mac.type == e1000_82576) + vmolr |= E1000_VMOLR_ROPE; } else { if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; @@ -4050,17 +4190,34 @@ static void igb_set_rx_mode(struct net_device *netdev) vmolr |= E1000_VMOLR_ROMPE; } } - /* Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = igb_write_uc_addr_list(netdev); - if (count < 0) { - rctl |= E1000_RCTL_UPE; - vmolr |= E1000_VMOLR_ROPE; - } - rctl |= E1000_RCTL_VFE; } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = igb_write_uc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + + /* enable VLAN filtering by default */ + rctl |= E1000_RCTL_VFE; + + /* disable VLAN filtering for modes that require it */ + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + /* if we fail to set all rules then just clear VFE */ + if (igb_vlan_promisc_enable(adapter)) + rctl &= ~E1000_RCTL_VFE; + } else { + igb_vlan_promisc_disable(adapter); + } + + /* update state of unicast, multicast, and VLAN filtering modes */ + rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | + E1000_RCTL_VFE); wr32(E1000_RCTL, rctl); /* In order to support SR-IOV and eventually VMDq it is necessary to set @@ -4071,9 +4228,19 @@ static void igb_set_rx_mode(struct net_device *netdev) if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) return; + /* set UTA to appropriate mode */ + igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); + vmolr |= rd32(E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + + /* enable Rx jumbo frames, no need for restriction */ + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE); + igb_restore_vf_multicasts(adapter); } @@ -4227,6 +4394,7 @@ static void igb_watchdog_task(struct work_struct *work) u32 link; int i; u32 connsw; + u16 phy_data, retry_count = 20; link = igb_has_link(adapter); @@ -4305,6 +4473,25 @@ static void igb_watchdog_task(struct work_struct *work) break; } + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + } + } else { + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + } +no_wait: netif_carrier_on(netdev); igb_ping_all_vfs(adapter); @@ -4713,70 +4900,57 @@ static int igb_tso(struct igb_ring *tx_ring, return 1; } +static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) return; - } else { - u8 l4_hdr = 0; - - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); - } - break; - } + goto no_csum; + } - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - E1000_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - E1000_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - E1000_ADVTXD_L4LEN_SHIFT; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); - } + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + igb_ipv6_csum_is_sctp(skb))) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; break; } - - /* update TX checksum flag */ - first->tx_flags |= IGB_TX_FLAGS_CSUM; + default: + skb_checksum_help(skb); + goto csum_failed; } + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); } #define IGB_SET_FLAG(_input, _flag, _result) \ @@ -5088,16 +5262,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, { struct igb_adapter *adapter = netdev_priv(netdev); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ @@ -5792,125 +5956,132 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; - u32 pool_mask, reg, vid; - int i; + u32 pool_mask, vlvf_mask, i; - pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + /* create mask for VF and other pools */ + pool_mask = E1000_VLVF_POOLSEL_MASK; + vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* drop PF from pool bits */ + pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count)); /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); + for (i = E1000_VLVF_ARRAY_SIZE; i--;) { + u32 vlvf = rd32(E1000_VLVF(i)); + u32 vfta_mask, vid, vfta; /* remove the vf from the pool */ - reg &= ~pool_mask; - - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK) && - (reg & E1000_VLVF_VLANID_ENABLE)) { - reg = 0; - vid = reg & E1000_VLVF_VLANID_MASK; - igb_vfta_set(hw, vid, false); - } + if (!(vlvf & vlvf_mask)) + continue; + + /* clear out bit from VLVF */ + vlvf ^= vlvf_mask; + + /* if other pools are present, just remove ourselves */ + if (vlvf & pool_mask) + goto update_vlvfb; - wr32(E1000_VLVF(i), reg); + /* if PF is present, leave VFTA */ + if (vlvf & E1000_VLVF_POOLSEL_MASK) + goto update_vlvf; + + vid = vlvf & E1000_VLVF_VLANID_MASK; + vfta_mask = 1 << (vid % 32); + + /* clear bit from VFTA */ + vfta = adapter->shadow_vfta[vid / 32]; + if (vfta & vfta_mask) + hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); +update_vlvf: + /* clear pool selection enable */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + vlvf &= E1000_VLVF_POOLSEL_MASK; + else + vlvf = 0; +update_vlvfb: + /* clear pool bits */ + wr32(E1000_VLVF(i), vlvf); } +} - adapter->vf_data[vf].vlans_enabled = 0; +static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the VLAN id in the VLVF entries */ + for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { + vlvf = rd32(E1000_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; } -static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) +void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) { struct e1000_hw *hw = &adapter->hw; - u32 reg, i; - - /* The vlvf table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return -1; + u32 bits, pf_id; + int idx; - /* we only need to do this if VMDq is enabled */ - if (!adapter->vfs_allocated_count) - return -1; + idx = igb_find_vlvf_entry(hw, vid); + if (!idx) + return; - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK; + bits &= rd32(E1000_VLVF(idx)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits) { + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + wr32(E1000_VLVF(idx), 1 << pf_id); + else + wr32(E1000_VLVF(idx), 0); } +} - if (add) { - if (i == E1000_VLVF_ARRAY_SIZE) { - /* Did not find a matching VLAN ID entry that was - * enabled. Search for a free filter entry, i.e. - * one without the enable bit set - */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if (!(reg & E1000_VLVF_VLANID_ENABLE)) - break; - } - } - if (i < E1000_VLVF_ARRAY_SIZE) { - /* Found an enabled/available entry */ - reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); - - /* if !enabled we need to set this up in vfta */ - if (!(reg & E1000_VLVF_VLANID_ENABLE)) { - /* add VID to filter table */ - igb_vfta_set(hw, vid, true); - reg |= E1000_VLVF_VLANID_ENABLE; - } - reg &= ~E1000_VLVF_VLANID_MASK; - reg |= vid; - wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size += 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } +static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, + bool add, u32 vf) +{ + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + int err; - adapter->vf_data[vf].vlans_enabled++; - } - } else { - if (i < E1000_VLVF_ARRAY_SIZE) { - /* remove vf from the pool */ - reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK)) { - reg = 0; - igb_vfta_set(hw, vid, false); - } - wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - adapter->vf_data[vf].vlans_enabled--; - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size -= 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } - } + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = igb_vfta_set(hw, vid, pf_id, true, false); + if (err) + return err; } - return 0; + + err = igb_vfta_set(hw, vid, vf, add, false); + + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_update_pf_vlvf(adapter, vid); + + return err; } static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) @@ -5923,130 +6094,104 @@ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) wr32(E1000_VMVIR(vf), 0); } -static int igb_ndo_set_vf_vlan(struct net_device *netdev, - int vf, u16 vlan, u8 qos) +static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, + u16 vlan, u8 qos) { - int err = 0; - struct igb_adapter *adapter = netdev_priv(netdev); + int err; - if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (vlan || qos) { - err = igb_vlvf_set(adapter, vlan, !!vlan, vf); - if (err) - goto out; - igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); - igb_set_vmolr(adapter, vf, !vlan); - adapter->vf_data[vf].pf_vlan = vlan; - adapter->vf_data[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before attempting to use the VF device.\n"); - } - } else { - igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, - false, vf); - igb_set_vmvir(adapter, vlan, vf); - igb_set_vmolr(adapter, vf, true); - adapter->vf_data[vf].pf_vlan = 0; - adapter->vf_data[vf].pf_qos = 0; + err = igb_set_vf_vlan(adapter, vlan, true, vf); + if (err) + return err; + + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + + /* revoke access to previous VLAN */ + if (vlan != adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); } -out: + return err; } -static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) +static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) { - struct e1000_hw *hw = &adapter->hw; - int i; - u32 reg; + /* Restore tagless access via VLAN 0 */ + igb_set_vf_vlan(adapter, 0, true, vf); - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; - } + igb_set_vmvir(adapter, 0, vf); + igb_set_vmolr(adapter, vf, true); + + /* Remove any PF assigned VLAN */ + if (adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); - if (i >= E1000_VLVF_ARRAY_SIZE) - i = -1; + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + igb_set_vf_vlan_strip(adapter, vf, false); - return i; + return 0; } -static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos) { - struct e1000_hw *hw = &adapter->hw; - int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; - int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); - int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); - /* If in promiscuous mode we need to make sure the PF also has - * the VLAN filter set. - */ - if (add && (adapter->netdev->flags & IFF_PROMISC)) - err = igb_vlvf_set(adapter, vid, add, - adapter->vfs_allocated_count); - if (err) - goto out; + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; - err = igb_vlvf_set(adapter, vid, add, vf); + return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : + igb_disable_port_vlan(adapter, vf); +} - if (err) - goto out; +static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int ret; - /* Go through all the checks to see if the VLAN filter should - * be wiped completely. - */ - if (!add && (adapter->netdev->flags & IFF_PROMISC)) { - u32 vlvf, bits; - int regndx = igb_find_vlvf_entry(adapter, vid); - - if (regndx < 0) - goto out; - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - vlvf = bits = rd32(E1000_VLVF(regndx)); - bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + - adapter->vfs_allocated_count); - /* If the filter was removed then ensure PF pool bit - * is cleared if the PF only added itself to the pool - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && - !test_bit(vid, adapter->active_vlans) && - !bits) - igb_vlvf_set(adapter, vid, add, - adapter->vfs_allocated_count); - } + if (adapter->vf_data[vf].pf_vlan) + return -1; -out: - return err; + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + ret = igb_set_vf_vlan(adapter, vid, !!add, vf); + if (!ret) + igb_set_vf_vlan_strip(adapter, vf, !!vid); + return ret; } static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) { - /* clear flags - except flag that indicates PF has set the MAC */ - adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; - adapter->vf_data[vf].last_nack = jiffies; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - /* reset offloads to defaults */ - igb_set_vmolr(adapter, vf, true); + /* clear flags - except flag that indicates PF has set the MAC */ + vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; + vf_data->last_nack = jiffies; /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); - if (adapter->vf_data[vf].pf_vlan) - igb_ndo_set_vf_vlan(adapter->netdev, vf, - adapter->vf_data[vf].pf_vlan, - adapter->vf_data[vf].pf_qos); - else - igb_clear_vf_vfta(adapter, vf); + igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); + igb_set_vmvir(adapter, vf_data->pf_vlan | + (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); + igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; @@ -6191,7 +6336,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", vf); else - retval = igb_set_vf_vlan(adapter, msgbuf, vf); + retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); break; default: dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); @@ -6233,6 +6378,7 @@ static void igb_msg_task(struct igb_adapter *adapter) /** * igb_set_uta - Set unicast filter table address * @adapter: board private structure + * @set: boolean indicating if we are setting or clearing bits * * The unicast table address is a register array of 32-bit registers. * The table is meant to be used in a way similar to how the MTA is used @@ -6240,21 +6386,18 @@ static void igb_msg_task(struct igb_adapter *adapter) * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ -static void igb_set_uta(struct igb_adapter *adapter) +static void igb_set_uta(struct igb_adapter *adapter, bool set) { struct e1000_hw *hw = &adapter->hw; + u32 uta = set ? ~0 : 0; int i; - /* The UTA table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return; - /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return; - for (i = 0; i < hw->mac.uta_reg_count; i++) - array_wr32(E1000_UTA, i, ~0); + for (i = hw->mac.uta_reg_count; i--;) + array_wr32(E1000_UTA, i, uta); } /** @@ -6630,7 +6773,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, /* Even if we own the page, we are not allowed to use atomic_set() * This would break get_page_unless_zero() users. */ - atomic_inc(&page->_count); + page_ref_inc(page); return true; } @@ -7202,7 +7345,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) wr32(E1000_CTRL, ctrl); } - igb_rlpml_set(adapter); + igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); } static int igb_vlan_rx_add_vid(struct net_device *netdev, @@ -7212,11 +7355,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; - /* attempt to add filter to vlvf array */ - igb_vlvf_set(adapter, vid, true, pf_id); - /* add the filter since PF can receive vlans w/o entry in vlvf */ - igb_vfta_set(hw, vid, true); + if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, true, !!vid); set_bit(vid, adapter->active_vlans); @@ -7227,16 +7368,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; - s32 err; - - /* remove vlan from VLVF table array */ - err = igb_vlvf_set(adapter, vid, false, pf_id); + struct e1000_hw *hw = &adapter->hw; - /* if vid was not present in VLVF just remove it from table */ - if (err) - igb_vfta_set(hw, vid, false); + /* remove VID from filter table */ + if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, false, true); clear_bit(vid, adapter->active_vlans); @@ -7245,11 +7382,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, static void igb_restore_vlan(struct igb_adapter *adapter) { - u16 vid; + u16 vid = 1; igb_vlan_mode(adapter->netdev, adapter->netdev->features); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } @@ -7704,15 +7842,14 @@ static void igb_io_resume(struct pci_dev *pdev) static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, u8 qsel) { - u32 rar_low, rar_high; struct e1000_hw *hw = &adapter->hw; + u32 rar_low, rar_high; /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian + * from network order (big endian) to CPU endian */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + rar_low = le32_to_cpup((__be32 *)(addr)); + rar_high = le16_to_cpup((__be16 *)(addr + 4)); /* Indicate to hardware the Address is Valid. */ rar_high |= E1000_RAH_AV; @@ -7959,9 +8096,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) * than the Rx threshold. Set hwm to PBA - max frame * size in 16B units, capping it at PBA - 6KB. */ - hwm = 64 * pba - adapter->max_frame_size / 16; - if (hwm < 64 * (pba - 6)) - hwm = 64 * (pba - 6); + hwm = 64 * (pba - 6); reg = rd32(E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) @@ -7971,9 +8106,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* Set the DMA Coalescing Rx threshold to PBA - 2 * max * frame size, capping it at PBA - 10KB. */ - dmac_thr = pba - adapter->max_frame_size / 512; - if (dmac_thr < pba - 10) - dmac_thr = pba - 10; + dmac_thr = pba - 10; reg = rd32(E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index c44df87c38de..22a8a29895b4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -525,7 +525,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, ts.tv_nsec = rq->perout.period.nsec; ns = timespec64_to_ns(&ts); ns = ns >> 1; - if (on && ns <= 70000000LL) { + if (on && ((ns <= 70000000LL) || (ns == 125000000LL) || + (ns == 250000000LL) || (ns == 500000000LL))) { if (ns < 8LL) return -EINVAL; use_freq = 1; diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 7b6cb4c3764c..01752f44ace2 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -234,13 +234,19 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; - - /* Take ownership of the buffer */ - ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - - /* reserve mailbox for VF use */ - if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) - ret_val = E1000_SUCCESS; + int count = 10; + + do { + /* Take ownership of the buffer */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for VF use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); return ret_val; } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 297af801f051..c12442252adb 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -43,6 +43,7 @@ #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> +#include <linux/sctp.h> #include "igbvf.h" @@ -876,7 +877,6 @@ static irqreturn_t igbvf_msix_other(int irq, void *data) adapter->int_counter1++; - netif_carrier_off(netdev); hw->mac.get_link_status = 1; if (!test_bit(__IGBVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -1908,6 +1908,31 @@ static void igbvf_watchdog_task(struct work_struct *work) #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 +static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens, + u32 type_tucmd, u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + struct igbvf_buffer *buffer_info; + u16 i = tx_ring->next_to_use; + + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + buffer_info->time_stamp = jiffies; + buffer_info->dma = 0; +} + static int igbvf_tso(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, @@ -1987,65 +2012,56 @@ static int igbvf_tso(struct igbvf_adapter *adapter, return true; } -static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - __be16 protocol) +static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb) { - struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; - struct igbvf_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - - if ((skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IGBVF_TX_FLAGS_VLAN)) { - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + unsigned int offset = 0; - if (tx_flags & IGBVF_TX_FLAGS_VLAN) - info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - if (skb->ip_summed == CHECKSUM_PARTIAL) - info |= (skb_transport_header(skb) - - skb_network_header(skb)); + return offset == skb_checksum_start_offset(skb); +} - context_desc->vlan_macip_lens = cpu_to_le32(info); +static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol) +{ + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(tx_flags & IGBVF_TX_FLAGS_VLAN)) + return false; + goto no_csum; + } - if (skb->ip_summed == CHECKSUM_PARTIAL) { - switch (protocol) { - case htons(ETH_P_IP): - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - break; - case htons(ETH_P_IPV6): - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - break; - default: - break; - } + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((protocol == htons(ETH_P_IPV6)) && + igbvf_ipv6_csum_is_sctp(skb))) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; + break; } - - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); - context_desc->seqnum_seed = 0; - context_desc->mss_l4len_idx = 0; - - buffer_info->time_stamp = jiffies; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return true; + default: + skb_checksum_help(skb); + goto csum_failed; } - return false; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; + + igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + return true; } static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) @@ -2264,7 +2280,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; - else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && + else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; @@ -2717,11 +2733,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->bd_number = cards_found++; netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | @@ -2731,11 +2747,14 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; + + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_CSUM; /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 0f1eca639f68..f00a41d9a1ca 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -126,6 +126,7 @@ struct e1000_adv_tx_context_desc { #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4b9156cd8b93..84fa28ceb200 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -796,6 +796,10 @@ struct ixgbe_adapter { u8 default_up; unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ +#define IXGBE_MAX_LINK_HANDLE 10 + struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; + unsigned long tables; + /* maximum number of RETA entries among all devices supported by ixgbe * driver: currently it's x550 device in non-SRIOV mode */ @@ -925,6 +929,9 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id); void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx); void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bea96b3bc90c..726e0eeee63b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2520,9 +2520,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ixgbe_fdir_filter *input, - u16 sw_idx) +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) { struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4003a88bbf6..569cb0757c93 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -51,6 +51,8 @@ #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> #include <net/vxlan.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> #ifdef CONFIG_OF #include <linux/of_net.h> @@ -65,6 +67,7 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" +#include "ixgbe_model.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -1089,7 +1092,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) * @tx_ring: tx ring to clean **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring, int napi_budget) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; @@ -1127,7 +1130,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1942,7 +1945,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, /* Even if we own the page, we are not allowed to use atomic_set() * This would break get_page_unless_zero() users. */ - atomic_inc(&page->_count); + page_ref_inc(page); return true; } @@ -2784,7 +2787,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) #endif ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); + clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget); /* Exit if we are called by netpoll or busy polling is active */ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) @@ -5545,6 +5548,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #endif /* CONFIG_IXGBE_DCB */ #endif /* IXGBE_FCOE */ + /* initialize static ixgbe jump table entries */ + adapter->jump_tables[0] = ixgbe_ipv4_fields; + adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, GFP_ATOMIC); @@ -8200,6 +8206,225 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } +static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); + spin_unlock(&adapter->fdir_perfect_lock); + return err; +} + +static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + /* This ixgbe devices do not support hash tables at the moment + * so abort when given hash tables. + */ + if (cls->hnode.divisor > 0) + return -EINVAL; + + set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); + return 0; +} + +static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); + return 0; +} + +static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + u32 loc = cls->knode.handle & 0xfffff; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_mat_field *field_ptr; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input mask; +#ifdef CONFIG_NET_CLS_ACT + const struct tc_action *a; +#endif + int i, err = 0; + u8 queue; + u32 handle; + + memset(&mask, 0, sizeof(union ixgbe_atr_input)); + handle = cls->knode.handle; + + /* At the moment cls_u32 jumps to transport layer and skips past + * L2 headers. The canonical method to match L2 frames is to use + * negative values. However this is error prone at best but really + * just broken because there is no way to "know" what sort of hdr + * is in front of the transport layer. Fix cls_u32 to support L2 + * headers when needed. + */ + if (protocol != htons(ETH_P_IP)) + return -EINVAL; + + if (cls->knode.link_handle || + cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; + u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + if (!test_bit(uhtid, &adapter->tables)) + return -EINVAL; + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr->o != cls->knode.sel->offoff || + nexthdr->s != cls->knode.sel->offshift || + nexthdr->m != cls->knode.sel->offmask || + /* do not support multiple key jumps its just mad */ + cls->knode.sel->nkeys > 1) + return -EINVAL; + + if (nexthdr->off != cls->knode.sel->keys[0].off || + nexthdr->val != cls->knode.sel->keys[0].val || + nexthdr->mask != cls->knode.sel->keys[0].mask) + return -EINVAL; + + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return -EINVAL; + + adapter->jump_tables[uhtid] = nexthdr->jump; + } + return 0; + } + + if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + /* cls u32 is a graph starting at root node 0x800. The driver tracks + * links and also the fields used to advance the parser across each + * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map + * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h + * To add support for new nodes update ixgbe_model.h parse structures + * this function _should_ be generic try not to hardcode values here. + */ + if (TC_U32_USERHTID(handle) == 0x800) { + field_ptr = adapter->jump_tables[0]; + } else { + if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) + return -EINVAL; + + field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; + } + + if (!field_ptr) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + int off = cls->knode.sel->keys[i].off; + __be32 val = cls->knode.sel->keys[i].val; + __be32 m = cls->knode.sel->keys[i].mask; + bool found_entry = false; + int j; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off && + field_ptr[j].mask == m) { + field_ptr[j].val(input, &mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + + if (!found_entry) + goto err_out; + } + + mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + +#ifdef CONFIG_NET_CLS_ACT + if (list_empty(&cls->knode.exts->actions)) + goto err_out; + + list_for_each_entry(a, &cls->knode.exts->actions, list) { + if (!is_tcf_gact_shot(a)) + goto err_out; + } +#endif + + input->action = IXGBE_FDIR_DROP_QUEUE; + queue = IXGBE_FDIR_DROP_QUEUE; + input->sw_idx = loc; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + if (err) + goto err_out_w_lock; + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + err = -EINVAL; + goto err_out_w_lock; + } + + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, + input->sw_idx, queue); + if (!err) + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && + tc->type == TC_SETUP_CLSU32) { + switch (tc->cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, + proto, tc->cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, tc->cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, proto, + tc->cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, + tc->cls_u32); + default: + return -EINVAL; + } + } + + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return ixgbe_setup_tc(dev, tc->tc); +} + #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) { @@ -8262,19 +8487,17 @@ static int ixgbe_set_features(struct net_device *netdev, } /* - * Check if Flow Director n-tuple support was enabled or disabled. If - * the state changed, we need to reset. + * Check if Flow Director n-tuple support or hw_tc support was + * enabled or disabled. If the state changed, we need to reset. */ - switch (features & NETIF_F_NTUPLE) { - case NETIF_F_NTUPLE: + if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { /* turn off ATR, enable perfect filters and reset */ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) need_reset = true; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - break; - default: + } else { /* turn off perfect filters, enable ATR and reset */ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) need_reset = true; @@ -8282,23 +8505,16 @@ static int ixgbe_set_features(struct net_device *netdev, adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; /* We cannot enable ATR if SR-IOV is enabled */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - break; - - /* We cannot enable ATR if we have 2 or more traffic classes */ - if (netdev_get_num_tc(netdev) > 1) - break; - - /* We cannot enable ATR if RSS is disabled */ - if (adapter->ring_feature[RING_F_RSS].limit <= 1) - break; - - /* A sample rate of 0 indicates ATR disabled */ - if (!adapter->atr_sample_rate) - break; - - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - break; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || + /* We cannot enable ATR if we have 2 or more tcs */ + (netdev_get_num_tc(netdev) > 1) || + /* We cannot enable ATR if RSS is disabled */ + (adapter->ring_feature[RING_F_RSS].limit <= 1) || + /* A sample rate of 0 indicates ATR disabled */ + (!adapter->atr_sample_rate)) + ; /* do nothing not supported */ + else /* otherwise supported and set the flag */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } if (features & NETIF_F_HW_VLAN_CTAG_RX) @@ -8657,9 +8873,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, -#ifdef CONFIG_IXGBE_DCB - .ndo_setup_tc = ixgbe_setup_tc, -#endif + .ndo_setup_tc = __ixgbe_setup_tc, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif @@ -9030,7 +9244,8 @@ skip_sriov: case ixgbe_mac_X550EM_x: netdev->features |= NETIF_F_SCTP_CRC; netdev->hw_features |= NETIF_F_SCTP_CRC | - NETIF_F_NTUPLE; + NETIF_F_NTUPLE | + NETIF_F_HW_TC; break; default: break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h new file mode 100644 index 000000000000..ce48872d4782 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -0,0 +1,112 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux drive + * Copyright(c) 2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _IXGBE_MODEL_H_ +#define _IXGBE_MODEL_H_ + +#include "ixgbe.h" +#include "ixgbe_type.h" + +struct ixgbe_mat_field { + unsigned int off; + unsigned int mask; + int (*val)(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m); + unsigned int type; +}; + +static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.src_ip[0] = val; + mask->formatted.src_ip[0] = m; + return 0; +} + +static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.dst_ip[0] = val; + mask->formatted.dst_ip[0] = m; + return 0; +} + +static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { + { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .val = NULL } /* terminal node */ +}; + +static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.src_port = val & 0xffff; + mask->formatted.src_port = m & 0xffff; + return 0; +}; + +static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.dst_port = val & 0xffff; + mask->formatted.dst_port = m & 0xffff; + return 0; +}; + +static struct ixgbe_mat_field ixgbe_tcp_fields[] = { + {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, + .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, + {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport, + .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, + { .val = NULL } /* terminal node */ +}; + +struct ixgbe_nexthdr { + /* offset, shift, and mask of position to next header */ + unsigned int o; + u32 s; + u32 m; + /* match criteria to make this jump*/ + unsigned int off; + u32 val; + u32 mask; + /* location of jump to make */ + struct ixgbe_mat_field *jump; +}; + +static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, + { .jump = NULL } /* terminal node */ +}; +#endif /* _IXGBE_MODEL_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3558f019b631..0ea14c0a2e74 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -837,7 +837,7 @@ add_tail_frag: /* Even if we own the page, we are not allowed to use atomic_set() * This would break get_page_unless_zero() users. */ - atomic_inc(&page->_count); + page_ref_inc(page); return true; } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b1de7afd4116..3ddf657bc10b 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme) } static inline void -jme_clear_pm(struct jme_adapter *jme) +jme_clear_pm_enable_wol(struct jme_adapter *jme) { jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); } +static inline void +jme_clear_pm_disable_wol(struct jme_adapter *jme) +{ + jwrite32(jme, JME_PMCS, PMCS_STMASK); +} + static int jme_reload_eeprom(struct jme_adapter *jme) { @@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev) struct jme_adapter *jme = netdev_priv(netdev); int rc; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); JME_NAPI_ENABLE(jme); tasklet_init(&jme->linkch_task, jme_link_change_tasklet, @@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme) static void jme_powersave_phy(struct jme_adapter *jme) { - if (jme->reg_pmcs) { + if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { jme_set_100m_half(jme); if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) jme_wait_link(jme); - jme_clear_pm(jme); + jme_clear_pm_enable_wol(jme); } else { jme_phy_off(jme); } @@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev, if (wol->wolopts & WAKE_MAGIC) jme->reg_pmcs |= PMCS_MFEN; - jwrite32(jme, JME_PMCS, jme->reg_pmcs); - device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); - return 0; } @@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_read = jme_mdio_read; jme->mii_if.mdio_write = jme_mdio_write; - jme_clear_pm(jme); - device_set_wakeup_enable(&pdev->dev, true); + jme_clear_pm_disable_wol(jme); + device_init_wakeup(&pdev->dev, true); jme_set_phyfifo_5level(jme); jme->pcirev = pdev->revision; @@ -3304,7 +3307,7 @@ jme_resume(struct device *dev) if (!netif_running(netdev)) return 0; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) jme_set_settings(netdev, &jme->old_ecmd); @@ -3312,13 +3315,14 @@ jme_resume(struct device *dev) jme_reset_phy_processor(jme); jme_phy_calibration(jme); jme_phy_setEA(jme); - jme_start_irq(jme); netif_device_attach(netdev); atomic_inc(&jme->link_changing); jme_reset_link(jme); + jme_start_irq(jme); + return 0; } diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index a1c862b4664d..b5c6d42daa12 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -40,6 +40,19 @@ config MVMDIO This driver is used by the MV643XX_ETH and MVNETA drivers. +config MVNETA_BM_ENABLE + tristate "Marvell Armada 38x/XP network interface BM support" + depends on MVNETA + ---help--- + This driver supports auxiliary block of the network + interface units in the Marvell ARMADA XP and ARMADA 38x SoC + family, which is called buffer manager. + + This driver, when enabled, strictly cooperates with mvneta + driver and is common for all network ports of the devices, + even for Armada 370 SoC, which doesn't support hardware + buffer management. + config MVNETA tristate "Marvell Armada 370/38x/XP network interface support" depends on PLAT_ORION @@ -53,6 +66,15 @@ config MVNETA driver, which should be used for the older Marvell SoCs (Dove, Orion, Discovery, Kirkwood). +config MVNETA_BM + tristate + default y if MVNETA=y && MVNETA_BM_ENABLE + default MVNETA_BM_ENABLE + select HWBM + help + MVNETA_BM must not be 'm' if MVNETA=y, so this symbol ensures + that all dependencies are met. + config MVPP2 tristate "Marvell Armada 375 network interface support" depends on MACH_ARMADA_375 diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index f6425bd2884b..ff1bffa74803 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_MVMDIO) += mvmdio.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o +obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o obj-$(CONFIG_MVNETA) += mvneta.o obj-$(CONFIG_MVPP2) += mvpp2.o obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 662c2ee268c7..577f7ca7deba 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -30,6 +30,8 @@ #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/skbuff.h> +#include <net/hwbm.h> +#include "mvneta_bm.h" #include <net/ip.h> #include <net/ipv6.h> #include <net/tso.h> @@ -37,6 +39,10 @@ /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) +#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 +#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 +#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 +#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) @@ -50,6 +56,9 @@ #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 +#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) +#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 +#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 #define MVNETA_PORT_RX_RESET 0x1cc0 #define MVNETA_PORT_RX_DMA_RESET BIT(0) #define MVNETA_PHY_ADDR 0x2000 @@ -107,6 +116,7 @@ #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) #define MVNETA_ACC_MODE 0x2500 +#define MVNETA_BM_ADDRESS 0x2504 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 @@ -253,7 +263,10 @@ #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 #define MVNETA_TX_CSUM_DEF_SIZE 1600 #define MVNETA_TX_CSUM_MAX_SIZE 9800 -#define MVNETA_ACC_MODE_EXT 1 +#define MVNETA_ACC_MODE_EXT1 1 +#define MVNETA_ACC_MODE_EXT2 2 + +#define MVNETA_MAX_DECODE_WIN 6 /* Timeout constants */ #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 @@ -293,7 +306,8 @@ ((addr >= txq->tso_hdrs_phys) && \ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) -#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +#define MVNETA_RX_GET_BM_POOL_ID(rxd) \ + (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) struct mvneta_statistic { unsigned short offset; @@ -359,6 +373,7 @@ struct mvneta_pcpu_port { }; struct mvneta_port { + u8 id; struct mvneta_pcpu_port __percpu *ports; struct mvneta_pcpu_stats __percpu *stats; @@ -370,6 +385,11 @@ struct mvneta_port { struct net_device *dev; struct notifier_block cpu_notifier; int rxq_def; + /* Protect the access to the percpu interrupt registers, + * ensuring that the configuration remains coherent. + */ + spinlock_t lock; + bool is_stopped; /* Core clock */ struct clk *clk; @@ -389,6 +409,11 @@ struct mvneta_port { unsigned int tx_csum_limit; unsigned int use_inband_status:1; + struct mvneta_bm *bm_priv; + struct mvneta_bm_pool *pool_long; + struct mvneta_bm_pool *pool_short; + int bm_win_id; + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; @@ -414,6 +439,8 @@ struct mvneta_port { #define MVNETA_TX_L4_CSUM_NOT BIT(31) #define MVNETA_RXD_ERR_CRC 0x0 +#define MVNETA_RXD_BM_POOL_SHIFT 13 +#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) #define MVNETA_RXD_ERR_SUMMARY BIT(16) #define MVNETA_RXD_ERR_OVERRUN BIT(17) #define MVNETA_RXD_ERR_LEN BIT(18) @@ -558,6 +585,9 @@ static int rxq_def; static int rx_copybreak __read_mostly = 256; +/* HW BM need that each port be identify by a unique ID */ +static int global_port_id; + #define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" @@ -824,6 +854,215 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } +/* Enable buffer management (BM) */ +static void mvneta_rxq_bm_enable(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val |= MVNETA_RXQ_HW_BUF_ALLOC; + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Notify HW about port's assignment of pool for bigger packets */ +static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; + val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Notify HW about port's assignment of pool for smaller packets */ +static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; + val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Set port's receive buffer size for assigned BM pool */ +static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, + int buf_size, + u8 pool_id) +{ + u32 val; + + if (!IS_ALIGNED(buf_size, 8)) { + dev_warn(pp->dev->dev.parent, + "illegal buf_size value %d, round to %d\n", + buf_size, ALIGN(buf_size, 8)); + buf_size = ALIGN(buf_size, 8); + } + + val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); + val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; + mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); +} + +/* Configure MBUS window in order to enable access BM internal SRAM */ +static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, + u8 target, u8 attr) +{ + u32 win_enable, win_protect; + int i; + + win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); + + if (pp->bm_win_id < 0) { + /* Find first not occupied window */ + for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { + if (win_enable & (1 << i)) { + pp->bm_win_id = i; + break; + } + } + if (i == MVNETA_MAX_DECODE_WIN) + return -ENOMEM; + } else { + i = pp->bm_win_id; + } + + mvreg_write(pp, MVNETA_WIN_BASE(i), 0); + mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); + + if (i < 4) + mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); + + mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | + (attr << 8) | target); + + mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); + + win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); + win_protect |= 3 << (2 * i); + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); + + win_enable &= ~(1 << i); + mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); + + return 0; +} + +/* Assign and initialize pools for port. In case of fail + * buffer manager will remain disabled for current port. + */ +static int mvneta_bm_port_init(struct platform_device *pdev, + struct mvneta_port *pp) +{ + struct device_node *dn = pdev->dev.of_node; + u32 long_pool_id, short_pool_id, wsize; + u8 target, attr; + int err; + + /* Get BM window information */ + err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, + &target, &attr); + if (err < 0) + return err; + + pp->bm_win_id = -1; + + /* Open NETA -> BM window */ + err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, + target, attr); + if (err < 0) { + netdev_info(pp->dev, "fail to configure mbus window to BM\n"); + return err; + } + + if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { + netdev_info(pp->dev, "missing long pool id\n"); + return -EINVAL; + } + + /* Create port's long pool depending on mtu */ + pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, + MVNETA_BM_LONG, pp->id, + MVNETA_RX_PKT_SIZE(pp->dev->mtu)); + if (!pp->pool_long) { + netdev_info(pp->dev, "fail to obtain long pool for port\n"); + return -ENOMEM; + } + + pp->pool_long->port_map |= 1 << pp->id; + + mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, + pp->pool_long->id); + + /* If short pool id is not defined, assume using single pool */ + if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) + short_pool_id = long_pool_id; + + /* Create port's short pool */ + pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, + MVNETA_BM_SHORT, pp->id, + MVNETA_BM_SHORT_PKT_SIZE); + if (!pp->pool_short) { + netdev_info(pp->dev, "fail to obtain short pool for port\n"); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + return -ENOMEM; + } + + if (short_pool_id != long_pool_id) { + pp->pool_short->port_map |= 1 << pp->id; + mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, + pp->pool_short->id); + } + + return 0; +} + +/* Update settings of a pool for bigger packets */ +static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) +{ + struct mvneta_bm_pool *bm_pool = pp->pool_long; + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; + int num; + + /* Release all buffers from long pool */ + mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); + if (hwbm_pool->buf_num) { + WARN(1, "cannot free all buffers in pool %d\n", + bm_pool->id); + goto bm_mtu_err; + } + + bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); + bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); + hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); + + /* Fill entire long pool */ + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); + if (num != hwbm_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", + bm_pool->id, num, hwbm_pool->size); + goto bm_mtu_err; + } + mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); + + return; + +bm_mtu_err: + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); + + pp->bm_priv = NULL; + mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); + netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); +} + /* Start the Ethernet port RX and TX activity */ static void mvneta_port_up(struct mvneta_port *pp) { @@ -868,14 +1107,14 @@ static void mvneta_port_down(struct mvneta_port *pp) do { if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { netdev_warn(pp->dev, - "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", + "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", val); break; } mdelay(1); val = mvreg_read(pp, MVNETA_RXQ_CMD); - } while (val & 0xff); + } while (val & MVNETA_RXQ_ENABLE_MASK); /* Stop Tx port activity. Check port Tx activity. Issue stop * command for active channels only @@ -900,14 +1139,14 @@ static void mvneta_port_down(struct mvneta_port *pp) /* Check TX Command reg that all Txqs are stopped */ val = mvreg_read(pp, MVNETA_TXQ_CMD); - } while (val & 0xff); + } while (val & MVNETA_TXQ_ENABLE_MASK); /* Double check to verify that TX FIFO is empty */ count = 0; do { if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { netdev_warn(pp->dev, - "TX FIFO empty timeout status=0x08%x\n", + "TX FIFO empty timeout status=0x%08x\n", val); break; } @@ -1038,6 +1277,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) } } +static void mvneta_percpu_unmask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are unmasked, but actually only the ones + * mapped to this CPU will be unmasked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK_ALL | + MVNETA_TX_INTR_MASK_ALL | + MVNETA_MISCINTR_INTR_MASK); +} + +static void mvneta_percpu_mask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are masked, but actually only the ones + * mapped to this CPU will be masked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); +} + +static void mvneta_percpu_clear_intr_cause(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are cleared, but actually only the ones + * mapped to this CPU will be cleared + */ + mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); +} + /* This method sets defaults to the NETA port: * Clears interrupt Cause and Mask registers. * Clears all MAC tables. @@ -1055,14 +1331,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp) int max_cpu = num_present_cpus(); /* Clear all Cause registers */ - mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); - mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); - mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); /* Mask all interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_ENABLE, 0); /* Enable MBUS Retry bit16 */ @@ -1111,9 +1383,17 @@ static void mvneta_defaults_set(struct mvneta_port *pp) mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); /* Set Port Acceleration Mode */ - val = MVNETA_ACC_MODE_EXT; + if (pp->bm_priv) + /* HW buffer management + legacy parser */ + val = MVNETA_ACC_MODE_EXT2; + else + /* SW buffer management + legacy parser */ + val = MVNETA_ACC_MODE_EXT1; mvreg_write(pp, MVNETA_ACC_MODE, val); + if (pp->bm_priv) + mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); + /* Update val of portCfg register accordingly with all RxQueue types */ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); mvreg_write(pp, MVNETA_PORT_CONFIG, val); @@ -1480,23 +1760,25 @@ static void mvneta_txq_done(struct mvneta_port *pp, } } -static void *mvneta_frag_alloc(const struct mvneta_port *pp) +void *mvneta_frag_alloc(unsigned int frag_size) { - if (likely(pp->frag_size <= PAGE_SIZE)) - return netdev_alloc_frag(pp->frag_size); + if (likely(frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(frag_size); else - return kmalloc(pp->frag_size, GFP_ATOMIC); + return kmalloc(frag_size, GFP_ATOMIC); } +EXPORT_SYMBOL_GPL(mvneta_frag_alloc); -static void mvneta_frag_free(const struct mvneta_port *pp, void *data) +void mvneta_frag_free(unsigned int frag_size, void *data) { - if (likely(pp->frag_size <= PAGE_SIZE)) + if (likely(frag_size <= PAGE_SIZE)) skb_free_frag(data); else kfree(data); } +EXPORT_SYMBOL_GPL(mvneta_frag_free); -/* Refill processing */ +/* Refill processing for SW buffer management */ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc) @@ -1504,7 +1786,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp, dma_addr_t phys_addr; void *data; - data = mvneta_frag_alloc(pp); + data = mvneta_frag_alloc(pp->frag_size); if (!data) return -ENOMEM; @@ -1512,7 +1794,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { - mvneta_frag_free(pp, data); + mvneta_frag_free(pp->frag_size, data); return -ENOMEM; } @@ -1558,22 +1840,156 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, int rx_done, i; rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + if (rx_done) + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + + if (pp->bm_priv) { + for (i = 0; i < rx_done; i++) { + struct mvneta_rx_desc *rx_desc = + mvneta_rxq_next_desc_get(rxq); + u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); + struct mvneta_bm_pool *bm_pool; + + bm_pool = &pp->bm_priv->bm_pools[pool_id]; + /* Return dropped buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); + } + return; + } + for (i = 0; i < rxq->size; i++) { struct mvneta_rx_desc *rx_desc = rxq->descs + i; void *data = (void *)rx_desc->buf_cookie; dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); - mvneta_frag_free(pp, data); + mvneta_frag_free(pp->frag_size, data); } +} - if (rx_done) - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); +/* Main rx processing when using software buffer management */ +static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, + struct mvneta_rx_queue *rxq) +{ + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); + struct net_device *dev = pp->dev; + int rx_done; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; + + /* Get number of received packets */ + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + + if (rx_todo > rx_done) + rx_todo = rx_done; + + rx_done = 0; + + /* Fairness NAPI loop */ + while (rx_done < rx_todo) { + struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + struct sk_buff *skb; + unsigned char *data; + dma_addr_t phys_addr; + u32 rx_status, frag_size; + int rx_bytes, err; + + rx_done++; + rx_status = rx_desc->status; + rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); + data = (unsigned char *)rx_desc->buf_cookie; + phys_addr = rx_desc->buf_phys_addr; + + if (!mvneta_rxq_desc_is_first_last(rx_status) || + (rx_status & MVNETA_RXD_ERR_SUMMARY)) { +err_drop_frame: + dev->stats.rx_errors++; + mvneta_rx_error(pp, rx_desc); + /* leave the descriptor untouched */ + continue; + } + + if (rx_bytes <= rx_copybreak) { + /* better copy a small frame and not unmap the DMA region */ + skb = netdev_alloc_skb_ip_align(dev, rx_bytes); + if (unlikely(!skb)) + goto err_drop_frame; + + dma_sync_single_range_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes, + DMA_FROM_DEVICE); + memcpy(skb_put(skb, rx_bytes), + data + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + mvneta_rx_csum(pp, rx_status, skb); + napi_gro_receive(&port->napi, skb); + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* leave the descriptor and buffer untouched */ + continue; + } + + /* Refill processing */ + err = mvneta_rx_refill(pp, rx_desc); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; + goto err_drop_frame; + } + + frag_size = pp->frag_size; + + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); + + /* After refill old buffer has to be unmapped regardless + * the skb is successfully built or not. + */ + dma_unmap_single(dev->dev.parent, phys_addr, + MVNETA_RX_BUF_SIZE(pp->pkt_size), + DMA_FROM_DEVICE); + + if (!skb) + goto err_drop_frame; + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* Linux processing */ + skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); + skb_put(skb, rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + + mvneta_rx_csum(pp, rx_status, skb); + + napi_gro_receive(&port->napi, skb); + } + + if (rcvd_pkts) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += rcvd_pkts; + stats->rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + + /* Update rxq management counters */ + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + + return rx_done; } -/* Main rx processing */ -static int mvneta_rx(struct mvneta_port *pp, int rx_todo, - struct mvneta_rx_queue *rxq) +/* Main rx processing when using hardware buffer management */ +static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo, + struct mvneta_rx_queue *rxq) { struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct net_device *dev = pp->dev; @@ -1592,21 +2008,29 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, /* Fairness NAPI loop */ while (rx_done < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + struct mvneta_bm_pool *bm_pool = NULL; struct sk_buff *skb; unsigned char *data; dma_addr_t phys_addr; - u32 rx_status; + u32 rx_status, frag_size; int rx_bytes, err; + u8 pool_id; rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); data = (unsigned char *)rx_desc->buf_cookie; phys_addr = rx_desc->buf_phys_addr; + pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); + bm_pool = &pp->bm_priv->bm_pools[pool_id]; if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { - err_drop_frame: +err_drop_frame_ret_pool: + /* Return the buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); +err_drop_frame: dev->stats.rx_errors++; mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ @@ -1617,7 +2041,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, /* better copy a small frame and not unmap the DMA region */ skb = netdev_alloc_skb_ip_align(dev, rx_bytes); if (unlikely(!skb)) - goto err_drop_frame; + goto err_drop_frame_ret_pool; dma_sync_single_range_for_cpu(dev->dev.parent, rx_desc->buf_phys_addr, @@ -1635,26 +2059,31 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, rcvd_pkts++; rcvd_bytes += rx_bytes; + /* Return the buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); + /* leave the descriptor and buffer untouched */ continue; } /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc); + err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); if (err) { netdev_err(dev, "Linux processing - Can't refill\n"); rxq->missed++; - goto err_drop_frame; + goto err_drop_frame_ret_pool; } - skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); + frag_size = bm_pool->hwbm_pool.frag_size; + + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); /* After refill old buffer has to be unmapped regardless * the skb is successfully built or not. */ - dma_unmap_single(dev->dev.parent, phys_addr, - MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); - + dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); if (!skb) goto err_drop_frame; @@ -2259,7 +2688,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget) if (rx_queue) { rx_queue = rx_queue - 1; - rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]); + if (pp->bm_priv) + rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]); + else + rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); } budget -= rx_done; @@ -2348,9 +2780,17 @@ static int mvneta_rxq_init(struct mvneta_port *pp, mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); - /* Fill RXQ with buffers from RX pool */ - mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); - mvneta_rxq_bm_disable(pp, rxq); + if (!pp->bm_priv) { + /* Fill RXQ with buffers from RX pool */ + mvneta_rxq_buf_size_set(pp, rxq, + MVNETA_RX_BUF_SIZE(pp->pkt_size)); + mvneta_rxq_bm_disable(pp, rxq); + } else { + mvneta_rxq_bm_enable(pp, rxq); + mvneta_rxq_long_pool_set(pp, rxq); + mvneta_rxq_short_pool_set(pp, rxq); + } + mvneta_rxq_fill(pp, rxq, rxq->size); return 0; @@ -2528,34 +2968,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } -static void mvneta_percpu_unmask_interrupt(void *arg) -{ - struct mvneta_port *pp = arg; - - /* All the queue are unmasked, but actually only the ones - * maped to this CPU will be unmasked - */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK_ALL | - MVNETA_TX_INTR_MASK_ALL | - MVNETA_MISCINTR_INTR_MASK); -} - -static void mvneta_percpu_mask_interrupt(void *arg) -{ - struct mvneta_port *pp = arg; - - /* All the queue are masked, but actually only the ones - * maped to this CPU will be masked - */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); -} - static void mvneta_start_dev(struct mvneta_port *pp) { - unsigned int cpu; + int cpu; mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -2564,16 +2979,15 @@ static void mvneta_start_dev(struct mvneta_port *pp) mvneta_port_enable(pp); /* Enable polling on the port */ - for_each_present_cpu(cpu) { + for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_enable(&port->napi); } /* Unmask interrupts. It has to be done from each CPU */ - for_each_online_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, - pp, true); + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | @@ -2589,7 +3003,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp) phy_stop(pp->phy_dev); - for_each_present_cpu(cpu) { + for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_disable(&port->napi); @@ -2604,13 +3018,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp) mvneta_port_disable(pp); /* Clear all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); - mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); + on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); /* Mask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); mvneta_tx_reset(pp); mvneta_rx_reset(pp); @@ -2652,6 +3063,9 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) dev->mtu = mtu; if (!netif_running(dev)) { + if (pp->bm_priv) + mvneta_bm_update_mtu(pp, mtu); + netdev_update_features(dev); return 0; } @@ -2664,6 +3078,9 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mvneta_cleanup_txqs(pp); mvneta_cleanup_rxqs(pp); + if (pp->bm_priv) + mvneta_bm_update_mtu(pp, mtu); + pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -2847,11 +3264,20 @@ static void mvneta_percpu_disable(void *arg) disable_percpu_irq(pp->dev->irq); } +/* Electing a CPU must be done in an atomic way: it should be done + * after or before the removal/insertion of a CPU and this function is + * not reentrant. + */ static void mvneta_percpu_elect(struct mvneta_port *pp) { - int online_cpu_idx, max_cpu, cpu, i = 0; + int elected_cpu = 0, max_cpu, cpu, i = 0; + + /* Use the cpu associated to the rxq when it is online, in all + * the other cases, use the cpu 0 which can't be offline. + */ + if (cpu_online(pp->rxq_def)) + elected_cpu = pp->rxq_def; - online_cpu_idx = pp->rxq_def % num_online_cpus(); max_cpu = num_present_cpus(); for_each_online_cpu(cpu) { @@ -2862,7 +3288,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) if ((rxq % max_cpu) == cpu) rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); - if (i == online_cpu_idx) + if (cpu == elected_cpu) /* Map the default receive queue queue to the * elected CPU */ @@ -2873,7 +3299,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) * the CPU bound to the default RX queue */ if (txq_number == 1) - txq_map = (i == online_cpu_idx) ? + txq_map = (cpu == elected_cpu) ? MVNETA_CPU_TXQ_ACCESS(1) : 0; else txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & @@ -2902,6 +3328,16 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + spin_lock(&pp->lock); + /* Configuring the driver for a new CPU while the + * driver is stopping is racy, so just avoid it. + */ + if (pp->is_stopped) { + spin_unlock(&pp->lock); + break; + } netif_tx_stop_all_queues(pp->dev); /* We have to synchronise on tha napi of each CPU @@ -2917,9 +3353,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, } /* Mask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); napi_enable(&port->napi); @@ -2934,27 +3368,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, */ mvneta_percpu_elect(pp); - /* Unmask all ethernet port interrupts, as this - * notifier is called for each CPU then the CPU to - * Queue mapping is applied - */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | - MVNETA_TX_INTR_MASK(txq_number) | - MVNETA_MISCINTR_INTR_MASK); + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_PSC_SYNC_CHANGE); netif_tx_start_all_queues(pp->dev); + spin_unlock(&pp->lock); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: netif_tx_stop_all_queues(pp->dev); + /* Thanks to this lock we are sure that any pending + * cpu election is done + */ + spin_lock(&pp->lock); /* Mask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); - mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + spin_unlock(&pp->lock); napi_synchronize(&port->napi); napi_disable(&port->napi); @@ -2968,12 +3400,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, case CPU_DEAD: case CPU_DEAD_FROZEN: /* Check if a new CPU must be elected now this on is down */ + spin_lock(&pp->lock); mvneta_percpu_elect(pp); + spin_unlock(&pp->lock); /* Unmask all ethernet port interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | - MVNETA_TX_INTR_MASK(txq_number) | - MVNETA_MISCINTR_INTR_MASK); + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | @@ -2988,7 +3419,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, static int mvneta_open(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - int ret, cpu; + int ret; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + @@ -3010,22 +3441,12 @@ static int mvneta_open(struct net_device *dev) goto err_cleanup_txqs; } - /* Even though the documentation says that request_percpu_irq - * doesn't enable the interrupts automatically, it actually - * does so on the local CPU. - * - * Make sure it's disabled. - */ - mvneta_percpu_disable(pp); - /* Enable per-CPU interrupt on all the CPU to handle our RX * queue interrupts */ - for_each_online_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_enable, - pp, true); - + on_each_cpu(mvneta_percpu_enable, pp, true); + pp->is_stopped = false; /* Register a CPU notifier to handle the case where our CPU * might be taken offline. */ @@ -3057,13 +3478,20 @@ err_cleanup_rxqs: static int mvneta_stop(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - int cpu; + + /* Inform that we are stopping so we don't want to setup the + * driver for new CPUs in the notifiers. The code of the + * notifier for CPU online is protected by the same spinlock, + * so when we get the lock, the notifer work is done. + */ + spin_lock(&pp->lock); + pp->is_stopped = true; + spin_unlock(&pp->lock); mvneta_stop_dev(pp); mvneta_mdio_remove(pp); unregister_cpu_notifier(&pp->cpu_notifier); - for_each_present_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); + on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(dev->irq, pp->ports); mvneta_cleanup_rxqs(pp); mvneta_cleanup_txqs(pp); @@ -3312,9 +3740,7 @@ static int mvneta_config_rss(struct mvneta_port *pp) netif_tx_stop_all_queues(pp->dev); - for_each_online_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, - pp, true); + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); /* We have to synchronise on the napi of each CPU */ for_each_online_cpu(cpu) { @@ -3335,7 +3761,9 @@ static int mvneta_config_rss(struct mvneta_port *pp) mvreg_write(pp, MVNETA_PORT_CONFIG, val); /* Update the elected CPU matching the new rxq_def */ + spin_lock(&pp->lock); mvneta_percpu_elect(pp); + spin_unlock(&pp->lock); /* We have to synchronise on the napi of each CPU */ for_each_online_cpu(cpu) { @@ -3539,6 +3967,7 @@ static int mvneta_probe(struct platform_device *pdev) struct resource *res; struct device_node *dn = pdev->dev.of_node; struct device_node *phy_node; + struct device_node *bm_node; struct mvneta_port *pp; struct net_device *dev; const char *dt_mac_addr; @@ -3594,6 +4023,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->ethtool_ops = &mvneta_eth_tool_ops; pp = netdev_priv(dev); + spin_lock_init(&pp->lock); pp->phy_node = phy_node; pp->phy_interface = phy_mode; @@ -3672,26 +4102,39 @@ static int mvneta_probe(struct platform_device *pdev) pp->tx_csum_limit = tx_csum_limit; + dram_target_info = mv_mbus_dram_info(); + if (dram_target_info) + mvneta_conf_mbus_windows(pp, dram_target_info); + pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; pp->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); + pp->id = global_port_id++; + + /* Obtain access to BM resources if enabled and already initialized */ + bm_node = of_parse_phandle(dn, "buffer-manager", 0); + if (bm_node && bm_node->data) { + pp->bm_priv = bm_node->data; + err = mvneta_bm_port_init(pdev, pp); + if (err < 0) { + dev_info(&pdev->dev, "use SW buffer management\n"); + pp->bm_priv = NULL; + } + } + err = mvneta_init(&pdev->dev, pp); if (err < 0) - goto err_free_stats; + goto err_netdev; err = mvneta_port_power_up(pp, phy_mode); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); - goto err_free_stats; + goto err_netdev; } - dram_target_info = mv_mbus_dram_info(); - if (dram_target_info) - mvneta_conf_mbus_windows(pp, dram_target_info); - for_each_present_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); @@ -3702,7 +4145,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; - dev->priv_flags |= IFF_UNICAST_FLT; + dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; err = register_netdev(dev); @@ -3726,6 +4169,13 @@ static int mvneta_probe(struct platform_device *pdev) return 0; +err_netdev: + unregister_netdev(dev); + if (pp->bm_priv) { + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, + 1 << pp->id); + } err_free_stats: free_percpu(pp->stats); err_free_ports: @@ -3757,6 +4207,12 @@ static int mvneta_remove(struct platform_device *pdev) of_node_put(pp->phy_node); free_netdev(dev); + if (pp->bm_priv) { + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, + 1 << pp->id); + } + return 0; } diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c new file mode 100644 index 000000000000..01fccec632ec --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -0,0 +1,487 @@ +/* + * Driver for Marvell NETA network controller Buffer Manager. + * + * Copyright (C) 2015 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <net/hwbm.h> +#include "mvneta_bm.h" + +#define MVNETA_BM_DRIVER_NAME "mvneta_bm" +#define MVNETA_BM_DRIVER_VERSION "1.0" + +static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data) +{ + writel(data, priv->reg_base + offset); +} + +static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset) +{ + return readl(priv->reg_base + offset); +} + +static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); + val |= MVNETA_BM_POOL_ENABLE_MASK; + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); + + /* Clear BM cause register */ + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); +} + +static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); + val &= ~MVNETA_BM_POOL_ENABLE_MASK; + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); +} + +static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + val |= mask; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + val &= ~mask; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, + u8 target_id, u8 attr) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); + val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); + val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); + val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id); + val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr); + + mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); +} + +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) +{ + struct mvneta_bm_pool *bm_pool = + (struct mvneta_bm_pool *)hwbm_pool->priv; + struct mvneta_bm *priv = bm_pool->priv; + dma_addr_t phys_addr; + + /* In order to update buf_cookie field of RX descriptor properly, + * BM hardware expects buf virtual address to be placed in the + * first four bytes of mapped buffer. + */ + *(u32 *)buf = (u32)buf; + phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) + return -ENOMEM; + + mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr); + return 0; +} +EXPORT_SYMBOL_GPL(mvneta_bm_construct); + +/* Create pool */ +static int mvneta_bm_pool_create(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ + struct platform_device *pdev = priv->pdev; + u8 target_id, attr; + int size_bytes, err; + size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size; + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, + &bm_pool->phys_addr, + GFP_KERNEL); + if (!bm_pool->virt_addr) + return -ENOMEM; + + if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, + bm_pool->phys_addr); + dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", + bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN); + return -ENOMEM; + } + + err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id, + &attr); + if (err < 0) { + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, + bm_pool->phys_addr); + return err; + } + + /* Set pool address */ + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id), + bm_pool->phys_addr); + + mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr); + mvneta_bm_pool_enable(priv, bm_pool->id); + + return 0; +} + +/* Notify the driver that BM pool is being used as specific type and return the + * pool pointer on success + */ +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size) +{ + struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id]; + int num, err; + + if (new_pool->type == MVNETA_BM_LONG && + new_pool->port_map != 1 << port_id) { + dev_err(&priv->pdev->dev, + "long pool cannot be shared by the ports\n"); + return NULL; + } + + if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) { + dev_err(&priv->pdev->dev, + "mixing pools' types between the ports is forbidden\n"); + return NULL; + } + + if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT) + new_pool->pkt_size = pkt_size; + + /* Allocate buffers in case BM pool hasn't been used yet */ + if (new_pool->type == MVNETA_BM_FREE) { + struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool; + + new_pool->priv = priv; + new_pool->type = type; + new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); + hwbm_pool->frag_size = + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + hwbm_pool->construct = mvneta_bm_construct; + hwbm_pool->priv = new_pool; + + /* Create new pool */ + err = mvneta_bm_pool_create(priv, new_pool); + if (err) { + dev_err(&priv->pdev->dev, "fail to create pool %d\n", + new_pool->id); + return NULL; + } + + /* Allocate buffers for this pool */ + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); + if (num != hwbm_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, hwbm_pool->size); + return NULL; + } + } + + return new_pool; +} +EXPORT_SYMBOL_GPL(mvneta_bm_pool_use); + +/* Free all buffers from the pool */ +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map) +{ + int i; + + bm_pool->port_map &= ~port_map; + if (bm_pool->port_map) + return; + + mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + + for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) { + dma_addr_t buf_phys_addr; + u32 *vaddr; + + /* Get buffer physical address (indirect access) */ + buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool); + + /* Work-around to the problems when destroying the pool, + * when it occurs that a read access to BPPI returns 0. + */ + if (buf_phys_addr == 0) + continue; + + vaddr = phys_to_virt(buf_phys_addr); + if (!vaddr) + break; + + dma_unmap_single(&priv->pdev->dev, buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); + } + + mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + + /* Update BM driver with number of buffers removed from pool */ + bm_pool->hwbm_pool.buf_num -= i; +} +EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); + +/* Cleanup pool */ +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map) +{ + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; + bm_pool->port_map &= ~port_map; + if (bm_pool->port_map) + return; + + bm_pool->type = MVNETA_BM_FREE; + + mvneta_bm_bufs_free(priv, bm_pool, port_map); + if (hwbm_pool->buf_num) + WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); + + if (bm_pool->virt_addr) { + dma_free_coherent(&priv->pdev->dev, + sizeof(u32) * hwbm_pool->size, + bm_pool->virt_addr, bm_pool->phys_addr); + bm_pool->virt_addr = NULL; + } + + mvneta_bm_pool_disable(priv, bm_pool->id); +} +EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy); + +static void mvneta_bm_pools_init(struct mvneta_bm *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct mvneta_bm_pool *bm_pool; + char prop[15]; + u32 size; + int i; + + /* Activate BM unit */ + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK); + + /* Create all pools with maximum size */ + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { + bm_pool = &priv->bm_pools[i]; + bm_pool->id = i; + bm_pool->type = MVNETA_BM_FREE; + + /* Reset read pointer */ + mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0); + + /* Reset write pointer */ + mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0); + + /* Configure pool size according to DT or use default value */ + sprintf(prop, "pool%d,capacity", i); + if (of_property_read_u32(dn, prop, &size)) { + size = MVNETA_BM_POOL_CAP_DEF; + } else if (size > MVNETA_BM_POOL_CAP_MAX) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, set to %d\n", + i, size, MVNETA_BM_POOL_CAP_MAX); + size = MVNETA_BM_POOL_CAP_MAX; + } else if (size < MVNETA_BM_POOL_CAP_MIN) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, set to %d\n", + i, size, MVNETA_BM_POOL_CAP_MIN); + size = MVNETA_BM_POOL_CAP_MIN; + } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, round to %d\n", + i, size, ALIGN(size, + MVNETA_BM_POOL_CAP_ALIGN)); + size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); + } + bm_pool->hwbm_pool.size = size; + + mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), + bm_pool->hwbm_pool.size); + + /* Obtain custom pkt_size from DT */ + sprintf(prop, "pool%d,pkt-size", i); + if (of_property_read_u32(dn, prop, &bm_pool->pkt_size)) + bm_pool->pkt_size = 0; + } +} + +static void mvneta_bm_default_set(struct mvneta_bm *priv) +{ + u32 val; + + /* Mask BM all interrupts */ + mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0); + + /* Clear BM cause register */ + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); + + /* Set BM configuration register */ + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + + /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */ + val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK; + val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static int mvneta_bm_init(struct mvneta_bm *priv) +{ + mvneta_bm_default_set(priv); + + /* Allocate and initialize BM pools structures */ + priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM, + sizeof(struct mvneta_bm_pool), + GFP_KERNEL); + if (!priv->bm_pools) + return -ENOMEM; + + mvneta_bm_pools_init(priv); + + return 0; +} + +static int mvneta_bm_get_sram(struct device_node *dn, + struct mvneta_bm *priv) +{ + priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0); + if (!priv->bppi_pool) + return -ENOMEM; + + priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool, + MVNETA_BM_BPPI_SIZE, + &priv->bppi_phys_addr); + if (!priv->bppi_virt_addr) + return -ENOMEM; + + return 0; +} + +static void mvneta_bm_put_sram(struct mvneta_bm *priv) +{ + gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr, + MVNETA_BM_BPPI_SIZE); +} + +static int mvneta_bm_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct mvneta_bm *priv; + struct resource *res; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->reg_base)) + return PTR_ERR(priv->reg_base); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err < 0) + return err; + + err = mvneta_bm_get_sram(dn, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to allocate internal memory\n"); + goto err_clk; + } + + priv->pdev = pdev; + + /* Initialize buffer manager internals */ + err = mvneta_bm_init(priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + goto err_sram; + } + + dn->data = priv; + platform_set_drvdata(pdev, priv); + + dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n"); + + return 0; + +err_sram: + mvneta_bm_put_sram(priv); +err_clk: + clk_disable_unprepare(priv->clk); + return err; +} + +static int mvneta_bm_remove(struct platform_device *pdev) +{ + struct mvneta_bm *priv = platform_get_drvdata(pdev); + u8 all_ports_map = 0xff; + int i = 0; + + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { + struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map); + } + + mvneta_bm_put_sram(priv); + + /* Dectivate BM unit */ + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static const struct of_device_id mvneta_bm_match[] = { + { .compatible = "marvell,armada-380-neta-bm" }, + { } +}; +MODULE_DEVICE_TABLE(of, mvneta_bm_match); + +static struct platform_driver mvneta_bm_driver = { + .probe = mvneta_bm_probe, + .remove = mvneta_bm_remove, + .driver = { + .name = MVNETA_BM_DRIVER_NAME, + .of_match_table = mvneta_bm_match, + }, +}; + +module_platform_driver(mvneta_bm_driver); + +MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com"); +MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h new file mode 100644 index 000000000000..e74fd44a92f7 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -0,0 +1,182 @@ +/* + * Driver for Marvell NETA network controller Buffer Manager. + * + * Copyright (C) 2015 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MVNETA_BM_H_ +#define _MVNETA_BM_H_ + +/* BM Configuration Register */ +#define MVNETA_BM_CONFIG_REG 0x0 +#define MVNETA_BM_STATUS_MASK 0x30 +#define MVNETA_BM_ACTIVE_MASK BIT(4) +#define MVNETA_BM_MAX_IN_BURST_SIZE_MASK 0x60000 +#define MVNETA_BM_MAX_IN_BURST_SIZE_16BP BIT(18) +#define MVNETA_BM_EMPTY_LIMIT_MASK BIT(19) + +/* BM Activation Register */ +#define MVNETA_BM_COMMAND_REG 0x4 +#define MVNETA_BM_START_MASK BIT(0) +#define MVNETA_BM_STOP_MASK BIT(1) +#define MVNETA_BM_PAUSE_MASK BIT(2) + +/* BM Xbar interface Register */ +#define MVNETA_BM_XBAR_01_REG 0x8 +#define MVNETA_BM_XBAR_23_REG 0xc +#define MVNETA_BM_XBAR_POOL_REG(pool) \ + (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG) +#define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0) +#define MVNETA_BM_TARGET_ID_MASK(pool) \ + (0xf << MVNETA_BM_TARGET_ID_OFFS(pool)) +#define MVNETA_BM_TARGET_ID_VAL(pool, id) \ + ((id) << MVNETA_BM_TARGET_ID_OFFS(pool)) +#define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4) +#define MVNETA_BM_XBAR_ATTR_MASK(pool) \ + (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool)) +#define MVNETA_BM_XBAR_ATTR_VAL(pool, attr) \ + ((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool)) + +/* Address of External Buffer Pointers Pool Register */ +#define MVNETA_BM_POOL_BASE_REG(pool) (0x10 + ((pool) << 4)) +#define MVNETA_BM_POOL_ENABLE_MASK BIT(0) + +/* External Buffer Pointers Pool RD pointer Register */ +#define MVNETA_BM_POOL_READ_PTR_REG(pool) (0x14 + ((pool) << 4)) +#define MVNETA_BM_POOL_SET_READ_PTR_MASK 0xfffc +#define MVNETA_BM_POOL_GET_READ_PTR_OFFS 16 +#define MVNETA_BM_POOL_GET_READ_PTR_MASK 0xfffc0000 + +/* External Buffer Pointers Pool WR pointer */ +#define MVNETA_BM_POOL_WRITE_PTR_REG(pool) (0x18 + ((pool) << 4)) +#define MVNETA_BM_POOL_SET_WRITE_PTR_OFFS 0 +#define MVNETA_BM_POOL_SET_WRITE_PTR_MASK 0xfffc +#define MVNETA_BM_POOL_GET_WRITE_PTR_OFFS 16 +#define MVNETA_BM_POOL_GET_WRITE_PTR_MASK 0xfffc0000 + +/* External Buffer Pointers Pool Size Register */ +#define MVNETA_BM_POOL_SIZE_REG(pool) (0x1c + ((pool) << 4)) +#define MVNETA_BM_POOL_SIZE_MASK 0x3fff + +/* BM Interrupt Cause Register */ +#define MVNETA_BM_INTR_CAUSE_REG (0x50) + +/* BM interrupt Mask Register */ +#define MVNETA_BM_INTR_MASK_REG (0x54) + +/* Other definitions */ +#define MVNETA_BM_SHORT_PKT_SIZE 256 +#define MVNETA_BM_POOLS_NUM 4 +#define MVNETA_BM_POOL_CAP_MIN 128 +#define MVNETA_BM_POOL_CAP_DEF 2048 +#define MVNETA_BM_POOL_CAP_MAX \ + (16 * 1024 - MVNETA_BM_POOL_CAP_ALIGN) +#define MVNETA_BM_POOL_CAP_ALIGN 32 +#define MVNETA_BM_POOL_PTR_ALIGN 32 + +#define MVNETA_BM_POOL_ACCESS_OFFS 8 + +#define MVNETA_BM_BPPI_SIZE 0x100000 + +#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) + +enum mvneta_bm_type { + MVNETA_BM_FREE, + MVNETA_BM_LONG, + MVNETA_BM_SHORT +}; + +struct mvneta_bm { + void __iomem *reg_base; + struct clk *clk; + struct platform_device *pdev; + + struct gen_pool *bppi_pool; + /* BPPI virtual base address */ + void __iomem *bppi_virt_addr; + /* BPPI physical base address */ + dma_addr_t bppi_phys_addr; + + /* BM pools */ + struct mvneta_bm_pool *bm_pools; +}; + +struct mvneta_bm_pool { + struct hwbm_pool hwbm_pool; + /* Pool number in the range 0-3 */ + u8 id; + enum mvneta_bm_type type; + + /* Packet size */ + int pkt_size; + /* Size of the buffer acces through DMA*/ + u32 buf_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE physical base address */ + dma_addr_t phys_addr; + + /* Ports using BM pool */ + u8 port_map; + + struct mvneta_bm *priv; +}; + +/* Declarations and definitions */ +void *mvneta_frag_alloc(unsigned int frag_size); +void mvneta_frag_free(unsigned int frag_size, void *data); + +#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE) +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map); +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map); +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf); +int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool); +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size); + +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + dma_addr_t buf_phys_addr) +{ + writel_relaxed(buf_phys_addr, priv->bppi_virt_addr + + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); +} + +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ + return readl_relaxed(priv->bppi_virt_addr + + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); +} +#else +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map) {} +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map) {} +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; } +int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) {return 0; } +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size) { return NULL; } + +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + dma_addr_t buf_phys_addr) {} + +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ return 0; } +#endif /* CONFIG_MVNETA_BM */ +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a4beccf1fd46..c797971aefab 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3061,7 +3061,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) - return -1; + return -ENOMEM; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); pe->index = tid; @@ -3077,7 +3077,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, if (pmap == 0) { if (add) { kfree(pe); - return -1; + return -EINVAL; } mvpp2_prs_hw_inv(priv, pe->index); priv->prs_shadow[pe->index].valid = false; diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig new file mode 100644 index 000000000000..698bb89aa901 --- /dev/null +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -0,0 +1,17 @@ +config NET_VENDOR_MEDIATEK + bool "MediaTek ethernet driver" + depends on ARCH_MEDIATEK + ---help--- + If you have a Mediatek SoC with ethernet, say Y. + +if NET_VENDOR_MEDIATEK + +config NET_MEDIATEK_SOC + tristate "MediaTek MT7623 Gigabit ethernet support" + depends on NET_VENDOR_MEDIATEK && (MACH_MT7623 || MACH_MT2701) + select PHYLIB + ---help--- + This driver supports the gigabit ethernet MACs in the + MediaTek MT2701/MT7623 chipset family. + +endif #NET_VENDOR_MEDIATEK diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile new file mode 100644 index 000000000000..aa3f1c8ccd4a --- /dev/null +++ b/drivers/net/ethernet/mediatek/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Mediatek SoCs built-in ethernet macs +# + +obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c new file mode 100644 index 000000000000..7f2126b6a179 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -0,0 +1,1808 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/clk.h> +#include <linux/if_vlan.h> +#include <linux/reset.h> +#include <linux/tcp.h> + +#include "mtk_eth_soc.h" + +static int mtk_msg_level = -1; +module_param_named(msg_level, mtk_msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); + +#define MTK_ETHTOOL_STAT(x) { #x, \ + offsetof(struct mtk_hw_stats, x) / sizeof(u64) } + +/* strings used by ethtool */ +static const struct mtk_ethtool_stats { + char str[ETH_GSTRING_LEN]; + u32 offset; +} mtk_ethtool_stats[] = { + MTK_ETHTOOL_STAT(tx_bytes), + MTK_ETHTOOL_STAT(tx_packets), + MTK_ETHTOOL_STAT(tx_skip), + MTK_ETHTOOL_STAT(tx_collisions), + MTK_ETHTOOL_STAT(rx_bytes), + MTK_ETHTOOL_STAT(rx_packets), + MTK_ETHTOOL_STAT(rx_overflow), + MTK_ETHTOOL_STAT(rx_fcs_errors), + MTK_ETHTOOL_STAT(rx_short_errors), + MTK_ETHTOOL_STAT(rx_long_errors), + MTK_ETHTOOL_STAT(rx_checksum_errors), + MTK_ETHTOOL_STAT(rx_flow_control_packets), +}; + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) +{ + __raw_writel(val, eth->base + reg); +} + +u32 mtk_r32(struct mtk_eth *eth, unsigned reg) +{ + return __raw_readl(eth->base + reg); +} + +static int mtk_mdio_busy_wait(struct mtk_eth *eth) +{ + unsigned long t_start = jiffies; + + while (1) { + if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) + return 0; + if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) + break; + usleep_range(10, 20); + } + + dev_err(eth->dev, "mdio: MDIO timeout\n"); + return -1; +} + +u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, + u32 phy_register, u32 write_data) +{ + if (mtk_mdio_busy_wait(eth)) + return -1; + + write_data &= 0xffff; + + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | + (phy_register << PHY_IAC_REG_SHIFT) | + (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data, + MTK_PHY_IAC); + + if (mtk_mdio_busy_wait(eth)) + return -1; + + return 0; +} + +u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) +{ + u32 d; + + if (mtk_mdio_busy_wait(eth)) + return 0xffff; + + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | + (phy_reg << PHY_IAC_REG_SHIFT) | + (phy_addr << PHY_IAC_ADDR_SHIFT), + MTK_PHY_IAC); + + if (mtk_mdio_busy_wait(eth)) + return 0xffff; + + d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; + + return d; +} + +static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, + int phy_reg, u16 val) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_write(eth, phy_addr, phy_reg, val); +} + +static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_read(eth, phy_addr, phy_reg); +} + +static void mtk_phy_link_adjust(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | + MAC_MCR_BACKPR_EN; + + switch (mac->phy_dev->speed) { + case SPEED_1000: + mcr |= MAC_MCR_SPEED_1000; + break; + case SPEED_100: + mcr |= MAC_MCR_SPEED_100; + break; + }; + + if (mac->phy_dev->link) + mcr |= MAC_MCR_FORCE_LINK; + + if (mac->phy_dev->duplex) + mcr |= MAC_MCR_FORCE_DPX; + + if (mac->phy_dev->pause) + mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; + + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); + + if (mac->phy_dev->link) + netif_carrier_on(dev); + else + netif_carrier_off(dev); +} + +static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, + struct device_node *phy_node) +{ + const __be32 *_addr = NULL; + struct phy_device *phydev; + int phy_mode, addr; + + _addr = of_get_property(phy_node, "reg", NULL); + + if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) { + pr_err("%s: invalid phy address\n", phy_node->name); + return -EINVAL; + } + addr = be32_to_cpu(*_addr); + phy_mode = of_get_phy_mode(phy_node); + if (phy_mode < 0) { + dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); + return -EINVAL; + } + + phydev = of_phy_connect(eth->netdev[mac->id], phy_node, + mtk_phy_link_adjust, 0, phy_mode); + if (!phydev) { + dev_err(eth->dev, "could not connect to PHY\n"); + return -ENODEV; + } + + dev_info(eth->dev, + "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", + mac->id, phydev_name(phydev), phydev->phy_id, + phydev->drv->name); + + mac->phy_dev = phydev; + + return 0; +} + +static int mtk_phy_connect(struct mtk_mac *mac) +{ + struct mtk_eth *eth = mac->hw; + struct device_node *np; + u32 val, ge_mode; + + np = of_parse_phandle(mac->of_node, "phy-handle", 0); + if (!np) + return -ENODEV; + + switch (of_get_phy_mode(np)) { + case PHY_INTERFACE_MODE_RGMII: + ge_mode = 0; + break; + case PHY_INTERFACE_MODE_MII: + ge_mode = 1; + break; + case PHY_INTERFACE_MODE_RMII: + ge_mode = 2; + break; + default: + dev_err(eth->dev, "invalid phy_mode\n"); + return -1; + } + + /* put the gmac into the right mode */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); + val |= SYSCFG0_GE_MODE(ge_mode, mac->id); + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + mtk_phy_connect_node(eth, mac, np); + mac->phy_dev->autoneg = AUTONEG_ENABLE; + mac->phy_dev->speed = 0; + mac->phy_dev->duplex = 0; + mac->phy_dev->supported &= PHY_BASIC_FEATURES; + mac->phy_dev->advertising = mac->phy_dev->supported | + ADVERTISED_Autoneg; + phy_start_aneg(mac->phy_dev); + + return 0; +} + +static int mtk_mdio_init(struct mtk_eth *eth) +{ + struct device_node *mii_np; + int err; + + mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); + if (!mii_np) { + dev_err(eth->dev, "no %s child node found", "mdio-bus"); + return -ENODEV; + } + + if (!of_device_is_available(mii_np)) { + err = 0; + goto err_put_node; + } + + eth->mii_bus = mdiobus_alloc(); + if (!eth->mii_bus) { + err = -ENOMEM; + goto err_put_node; + } + + eth->mii_bus->name = "mdio"; + eth->mii_bus->read = mtk_mdio_read; + eth->mii_bus->write = mtk_mdio_write; + eth->mii_bus->priv = eth; + eth->mii_bus->parent = eth->dev; + + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); + err = of_mdiobus_register(eth->mii_bus, mii_np); + if (err) + goto err_free_bus; + + return 0; + +err_free_bus: + kfree(eth->mii_bus); + +err_put_node: + of_node_put(mii_np); + eth->mii_bus = NULL; + return err; +} + +static void mtk_mdio_cleanup(struct mtk_eth *eth) +{ + if (!eth->mii_bus) + return; + + mdiobus_unregister(eth->mii_bus); + of_node_put(eth->mii_bus->dev.of_node); + kfree(eth->mii_bus); +} + +static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) +{ + u32 val; + + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + /* flush write */ + mtk_r32(eth, MTK_QDMA_INT_MASK); +} + +static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) +{ + u32 val; + + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + /* flush write */ + mtk_r32(eth, MTK_QDMA_INT_MASK); +} + +static int mtk_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + struct mtk_mac *mac = netdev_priv(dev); + const char *macaddr = dev->dev_addr; + unsigned long flags; + + if (ret) + return ret; + + spin_lock_irqsave(&mac->hw->page_lock, flags); + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MTK_GDMA_MAC_ADRH(mac->id)); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MTK_GDMA_MAC_ADRL(mac->id)); + spin_unlock_irqrestore(&mac->hw->page_lock, flags); + + return 0; +} + +void mtk_stats_update_mac(struct mtk_mac *mac) +{ + struct mtk_hw_stats *hw_stats = mac->hw_stats; + unsigned int base = MTK_GDM1_TX_GBCNT; + u64 stats; + + base += hw_stats->reg_offset; + + u64_stats_update_begin(&hw_stats->syncp); + + hw_stats->rx_bytes += mtk_r32(mac->hw, base); + stats = mtk_r32(mac->hw, base + 0x04); + if (stats) + hw_stats->rx_bytes += (stats << 32); + hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); + hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); + hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); + hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); + hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); + hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); + hw_stats->rx_flow_control_packets += + mtk_r32(mac->hw, base + 0x24); + hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); + hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); + hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); + stats = mtk_r32(mac->hw, base + 0x34); + if (stats) + hw_stats->tx_bytes += (stats << 32); + hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); + u64_stats_update_end(&hw_stats->syncp); +} + +static void mtk_stats_update(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->mac[i] || !eth->mac[i]->hw_stats) + continue; + if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { + mtk_stats_update_mac(eth->mac[i]); + spin_unlock(ð->mac[i]->hw_stats->stats_lock); + } + } +} + +static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hw_stats = mac->hw_stats; + unsigned int start; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock(&hw_stats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock(&hw_stats->stats_lock); + } + } + + do { + start = u64_stats_fetch_begin_irq(&hw_stats->syncp); + storage->rx_packets = hw_stats->rx_packets; + storage->tx_packets = hw_stats->tx_packets; + storage->rx_bytes = hw_stats->rx_bytes; + storage->tx_bytes = hw_stats->tx_bytes; + storage->collisions = hw_stats->tx_collisions; + storage->rx_length_errors = hw_stats->rx_short_errors + + hw_stats->rx_long_errors; + storage->rx_over_errors = hw_stats->rx_overflow; + storage->rx_crc_errors = hw_stats->rx_fcs_errors; + storage->rx_errors = hw_stats->rx_checksum_errors; + storage->tx_aborted_errors = hw_stats->tx_skip; + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); + + storage->tx_errors = dev->stats.tx_errors; + storage->rx_dropped = dev->stats.rx_dropped; + storage->tx_dropped = dev->stats.tx_dropped; + + return storage; +} + +static inline int mtk_max_frag_size(int mtu) +{ + /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ + if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) + mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + + return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static inline int mtk_max_buf_size(int frag_size) +{ + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + WARN_ON(buf_size < MTK_MAX_RX_LENGTH); + + return buf_size; +} + +static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, + struct mtk_rx_dma *dma_rxd) +{ + rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); + rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); + rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); + rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); +} + +/* the qdma core needs scratch memory to be setup */ +static int mtk_init_fq_dma(struct mtk_eth *eth) +{ + dma_addr_t phy_ring_head, phy_ring_tail; + int cnt = MTK_DMA_SIZE; + dma_addr_t dma_addr; + int i; + + eth->scratch_ring = dma_alloc_coherent(eth->dev, + cnt * sizeof(struct mtk_tx_dma), + &phy_ring_head, + GFP_ATOMIC | __GFP_ZERO); + if (unlikely(!eth->scratch_ring)) + return -ENOMEM; + + eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, + GFP_KERNEL); + dma_addr = dma_map_single(eth->dev, + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + return -ENOMEM; + + memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); + phy_ring_tail = phy_ring_head + + (sizeof(struct mtk_tx_dma) * (cnt - 1)); + + for (i = 0; i < cnt; i++) { + eth->scratch_ring[i].txd1 = + (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); + if (i < cnt - 1) + eth->scratch_ring[i].txd2 = (phy_ring_head + + ((i + 1) * sizeof(struct mtk_tx_dma))); + eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); + } + + mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); + mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); + mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); + + return 0; +} + +static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) +{ + void *ret = ring->dma; + + return ret + (desc - ring->phys); +} + +static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, + struct mtk_tx_dma *txd) +{ + int idx = txd - ring->dma; + + return &ring->buf[idx]; +} + +static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) +{ + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { + dma_unmap_single(dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { + dma_unmap_page(dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + tx_buf->flags = 0; + if (tx_buf->skb && + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) + dev_kfree_skb_any(tx_buf->skb); + tx_buf->skb = NULL; +} + +static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, + int tx_num, struct mtk_tx_ring *ring, bool gso) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_dma *itxd, *txd; + struct mtk_tx_buf *tx_buf; + unsigned long flags; + dma_addr_t mapped_addr; + unsigned int nr_frags; + int i, n_desc = 1; + u32 txd4 = 0; + + itxd = ring->next_free; + if (itxd == ring->last_free) + return -ENOMEM; + + /* set the forward port */ + txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; + + tx_buf = mtk_desc_to_tx_buf(ring, itxd); + memset(tx_buf, 0, sizeof(*tx_buf)); + + if (gso) + txd4 |= TX_DMA_TSO; + + /* TX Checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + txd4 |= TX_DMA_CHKSUM; + + /* VLAN header offload */ + if (skb_vlan_tag_present(skb)) + txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); + + mapped_addr = dma_map_single(&dev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + return -ENOMEM; + + /* normally we can rely on the stack not calling this more than once, + * however we have 2 queues running ont he same ring so we need to lock + * the ring access + */ + spin_lock_irqsave(ð->page_lock, flags); + WRITE_ONCE(itxd->txd1, mapped_addr); + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); + + /* TX SG offload */ + txd = itxd; + nr_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + unsigned int offset = 0; + int frag_size = skb_frag_size(frag); + + while (frag_size) { + bool last_frag = false; + unsigned int frag_map_size; + + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + if (txd == ring->last_free) + goto err_dma; + + n_desc++; + frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); + mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, + frag_map_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + goto err_dma; + + if (i == nr_frags - 1 && + (frag_size - frag_map_size) == 0) + last_frag = true; + + WRITE_ONCE(txd->txd1, mapped_addr); + WRITE_ONCE(txd->txd3, (TX_DMA_SWC | + TX_DMA_PLEN0(frag_map_size) | + last_frag * TX_DMA_LS0) | + mac->id); + WRITE_ONCE(txd->txd4, 0); + + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; + tx_buf = mtk_desc_to_tx_buf(ring, txd); + memset(tx_buf, 0, sizeof(*tx_buf)); + + tx_buf->flags |= MTK_TX_FLAGS_PAGE0; + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); + frag_size -= frag_map_size; + offset += frag_map_size; + } + } + + /* store skb to cleanup */ + tx_buf->skb = skb; + + WRITE_ONCE(itxd->txd4, txd4); + WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | + (!nr_frags * TX_DMA_LS0))); + + spin_unlock_irqrestore(ð->page_lock, flags); + + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); + + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); + atomic_sub(n_desc, &ring->free_count); + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + + return 0; + +err_dma: + do { + tx_buf = mtk_desc_to_tx_buf(ring, txd); + + /* unmap dma */ + mtk_tx_unmap(&dev->dev, tx_buf); + + itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); + } while (itxd != txd); + + spin_unlock_irqrestore(ð->page_lock, flags); + + return -ENOMEM; +} + +static inline int mtk_cal_txd_req(struct sk_buff *skb) +{ + int i, nfrags; + struct skb_frag_struct *frag; + + nfrags = 1; + if (skb_is_gso(skb)) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); + } + } else { + nfrags += skb_shinfo(skb)->nr_frags; + } + + return DIV_ROUND_UP(nfrags, 2); +} + +static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_ring *ring = ð->tx_ring; + struct net_device_stats *stats = &dev->stats; + bool gso = false; + int tx_num; + + tx_num = mtk_cal_txd_req(skb); + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { + netif_stop_queue(dev); + netif_err(eth, tx_queued, dev, + "Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + /* TSO: fill MSS info in tcp checksum field */ + if (skb_is_gso(skb)) { + if (skb_cow_head(skb, 0)) { + netif_warn(eth, tx_err, dev, + "GSO expand head fail.\n"); + goto drop; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + gso = true; + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); + } + } + + if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) + goto drop; + + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { + netif_stop_queue(dev); + if (unlikely(atomic_read(&ring->free_count) > + ring->thresh)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +drop: + stats->tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int mtk_poll_rx(struct napi_struct *napi, int budget, + struct mtk_eth *eth, u32 rx_intr) +{ + struct mtk_rx_ring *ring = ð->rx_ring; + int idx = ring->calc_idx; + struct sk_buff *skb; + u8 *data, *new_data; + struct mtk_rx_dma *rxd, trxd; + int done = 0; + + while (done < budget) { + struct net_device *netdev; + unsigned int pktlen; + dma_addr_t dma_addr; + int mac = 0; + + idx = NEXT_RX_DESP_IDX(idx); + rxd = &ring->dma[idx]; + data = ring->data[idx]; + + mtk_rx_get_desc(&trxd, rxd); + if (!(trxd.rxd2 & RX_DMA_DONE)) + break; + + /* find out which mac the packet come from. values start at 1 */ + mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & + RX_DMA_FPORT_MASK; + mac--; + + netdev = eth->netdev[mac]; + + /* alloc new buffer */ + new_data = napi_alloc_frag(ring->frag_size); + if (unlikely(!new_data)) { + netdev->stats.rx_dropped++; + goto release_desc; + } + dma_addr = dma_map_single(ð->netdev[mac]->dev, + new_data + NET_SKB_PAD, + ring->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { + skb_free_frag(new_data); + goto release_desc; + } + + /* receive data */ + skb = build_skb(data, ring->frag_size); + if (unlikely(!skb)) { + put_page(virt_to_head_page(new_data)); + goto release_desc; + } + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + dma_unmap_single(&netdev->dev, trxd.rxd1, + ring->buf_size, DMA_FROM_DEVICE); + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); + skb->dev = netdev; + skb_put(skb, pktlen); + if (trxd.rxd4 & RX_DMA_L4_VALID) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, netdev); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && + RX_DMA_VID(trxd.rxd3)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + RX_DMA_VID(trxd.rxd3)); + napi_gro_receive(napi, skb); + + ring->data[idx] = new_data; + rxd->rxd1 = (unsigned int)dma_addr; + +release_desc: + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + + ring->calc_idx = idx; + /* make sure that all changes to the dma ring are flushed before + * we continue + */ + wmb(); + mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0); + done++; + } + + if (done < budget) + mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS); + + return done; +} + +static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_dma *desc; + struct sk_buff *skb; + struct mtk_tx_buf *tx_buf; + int total = 0, done[MTK_MAX_DEVS]; + unsigned int bytes[MTK_MAX_DEVS]; + u32 cpu, dma; + static int condition; + int i; + + memset(done, 0, sizeof(done)); + memset(bytes, 0, sizeof(bytes)); + + cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); + dma = mtk_r32(eth, MTK_QTX_DRX_PTR); + + desc = mtk_qdma_phys_to_virt(ring, cpu); + + while ((cpu != dma) && budget) { + u32 next_cpu = desc->txd2; + int mac; + + desc = mtk_qdma_phys_to_virt(ring, desc->txd2); + if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) + break; + + mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & + TX_DMA_FPORT_MASK; + mac--; + + tx_buf = mtk_desc_to_tx_buf(ring, desc); + skb = tx_buf->skb; + if (!skb) { + condition = 1; + break; + } + + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { + bytes[mac] += skb->len; + done[mac]++; + budget--; + } + mtk_tx_unmap(eth->dev, tx_buf); + + ring->last_free->txd2 = next_cpu; + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = next_cpu; + } + + mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i] || !done[i]) + continue; + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); + total += done[i]; + } + + /* read hw index again make sure no new tx packet */ + if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) + *tx_again = true; + else + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); + + if (!total) + return 0; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i] || + unlikely(!netif_queue_stopped(eth->netdev[i]))) + continue; + if (atomic_read(&ring->free_count) > ring->thresh) + netif_wake_queue(eth->netdev[i]); + } + + return total; +} + +static int mtk_poll(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); + u32 status, status2, mask, tx_intr, rx_intr, status_intr; + int tx_done, rx_done; + bool tx_again = false; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + status2 = mtk_r32(eth, MTK_INT_STATUS2); + tx_intr = MTK_TX_DONE_INT; + rx_intr = MTK_RX_DONE_INT; + status_intr = (MTK_GDM1_AF | MTK_GDM2_AF); + tx_done = 0; + rx_done = 0; + tx_again = 0; + + if (status & tx_intr) + tx_done = mtk_poll_tx(eth, budget, &tx_again); + + if (status & rx_intr) + rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); + + if (unlikely(status2 & status_intr)) { + mtk_stats_update(eth); + mtk_w32(eth, status_intr, MTK_INT_STATUS2); + } + + if (unlikely(netif_msg_intr(eth))) { + mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + netdev_info(eth->netdev[0], + "done tx %d, rx %d, intr 0x%08x/0x%x\n", + tx_done, rx_done, status, mask); + } + + if (tx_again || rx_done == budget) + return budget; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + if (status & (tx_intr | rx_intr)) + return budget; + + napi_complete(napi); + mtk_irq_enable(eth, tx_intr | rx_intr); + + return rx_done; +} + +static int mtk_tx_alloc(struct mtk_eth *eth) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + int i, sz = sizeof(*ring->dma); + + ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), + GFP_KERNEL); + if (!ring->buf) + goto no_tx_mem; + + ring->dma = dma_alloc_coherent(eth->dev, + MTK_DMA_SIZE * sz, + &ring->phys, + GFP_ATOMIC | __GFP_ZERO); + if (!ring->dma) + goto no_tx_mem; + + memset(ring->dma, 0, MTK_DMA_SIZE * sz); + for (i = 0; i < MTK_DMA_SIZE; i++) { + int next = (i + 1) % MTK_DMA_SIZE; + u32 next_ptr = ring->phys + next * sz; + + ring->dma[i].txd2 = next_ptr; + ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + } + + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); + ring->next_free = &ring->dma[0]; + ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; + ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, + MAX_SKB_FRAGS); + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_CRX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_DRX_PTR); + + return 0; + +no_tx_mem: + return -ENOMEM; +} + +static void mtk_tx_clean(struct mtk_eth *eth) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + int i; + + if (ring->buf) { + for (i = 0; i < MTK_DMA_SIZE; i++) + mtk_tx_unmap(eth->dev, &ring->buf[i]); + kfree(ring->buf); + ring->buf = NULL; + } + + if (ring->dma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma, + ring->phys); + ring->dma = NULL; + } +} + +static int mtk_rx_alloc(struct mtk_eth *eth) +{ + struct mtk_rx_ring *ring = ð->rx_ring; + int i; + + ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN); + ring->buf_size = mtk_max_buf_size(ring->frag_size); + ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data), + GFP_KERNEL); + if (!ring->data) + return -ENOMEM; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (!ring->data[i]) + return -ENOMEM; + } + + ring->dma = dma_alloc_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + &ring->phys, + GFP_ATOMIC | __GFP_ZERO); + if (!ring->dma) + return -ENOMEM; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + dma_addr_t dma_addr = dma_map_single(eth->dev, + ring->data[i] + NET_SKB_PAD, + ring->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + return -ENOMEM; + ring->dma[i].rxd1 = (unsigned int)dma_addr; + + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); + } + ring->calc_idx = MTK_DMA_SIZE - 1; + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0); + mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0); + mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); + + return 0; +} + +static void mtk_rx_clean(struct mtk_eth *eth) +{ + struct mtk_rx_ring *ring = ð->rx_ring; + int i; + + if (ring->data && ring->dma) { + for (i = 0; i < MTK_DMA_SIZE; i++) { + if (!ring->data[i]) + continue; + if (!ring->dma[i].rxd1) + continue; + dma_unmap_single(eth->dev, + ring->dma[i].rxd1, + ring->buf_size, + DMA_FROM_DEVICE); + skb_free_frag(ring->data[i]); + } + kfree(ring->data); + ring->data = NULL; + } + + if (ring->dma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma, + ring->phys); + ring->dma = NULL; + } +} + +/* wait for DMA to finish whatever it is doing before we start using it again */ +static int mtk_dma_busy_wait(struct mtk_eth *eth) +{ + unsigned long t_start = jiffies; + + while (1) { + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) + break; + } + + dev_err(eth->dev, "DMA init timeout\n"); + return -1; +} + +static int mtk_dma_init(struct mtk_eth *eth) +{ + int err; + + if (mtk_dma_busy_wait(eth)) + return -EBUSY; + + /* QDMA needs scratch memory for internal reordering of the + * descriptors + */ + err = mtk_init_fq_dma(eth); + if (err) + return err; + + err = mtk_tx_alloc(eth); + if (err) + return err; + + err = mtk_rx_alloc(eth); + if (err) + return err; + + /* Enable random early drop and set drop threshold automatically */ + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, + MTK_QDMA_FC_THRES); + mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + + return 0; +} + +static void mtk_dma_free(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) + if (eth->netdev[i]) + netdev_reset_queue(eth->netdev[i]); + mtk_tx_clean(eth); + mtk_rx_clean(eth); + kfree(eth->scratch_head); +} + +static void mtk_tx_timeout(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + eth->netdev[mac->id]->stats.tx_errors++; + netif_err(eth, tx_err, dev, + "transmit timed out\n"); + schedule_work(&mac->pending_work); +} + +static irqreturn_t mtk_handle_irq(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + u32 status; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + if (unlikely(!status)) + return IRQ_NONE; + + if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) { + if (likely(napi_schedule_prep(ð->rx_napi))) + __napi_schedule(ð->rx_napi); + } else { + mtk_w32(eth, status, MTK_QMTK_INT_STATUS); + } + mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT)); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mtk_poll_controller(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; + + mtk_irq_disable(eth, int_mask); + mtk_handle_irq(dev->irq, dev); + mtk_irq_enable(eth, int_mask); +} +#endif + +static int mtk_start_dma(struct mtk_eth *eth) +{ + int err; + + err = mtk_dma_init(eth); + if (err) { + mtk_dma_free(eth); + return err; + } + + mtk_w32(eth, + MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | + MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | + MTK_RX_BT_32DWORDS, + MTK_QDMA_GLO_CFG); + + return 0; +} + +static int mtk_open(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + /* we run 2 netdevs on the same dma ring so we only bring it up once */ + if (!atomic_read(ð->dma_refcnt)) { + int err = mtk_start_dma(eth); + + if (err) + return err; + + napi_enable(ð->rx_napi); + mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + } + atomic_inc(ð->dma_refcnt); + + phy_start(mac->phy_dev); + netif_start_queue(dev); + + return 0; +} + +static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) +{ + unsigned long flags; + u32 val; + int i; + + /* stop the dma engine */ + spin_lock_irqsave(ð->page_lock, flags); + val = mtk_r32(eth, glo_cfg); + mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), + glo_cfg); + spin_unlock_irqrestore(ð->page_lock, flags); + + /* wait for dma stop */ + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, glo_cfg); + if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { + msleep(20); + continue; + } + break; + } +} + +static int mtk_stop(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + netif_tx_disable(dev); + phy_stop(mac->phy_dev); + + /* only shutdown DMA if this is the last user */ + if (!atomic_dec_and_test(ð->dma_refcnt)) + return 0; + + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + napi_disable(ð->rx_napi); + + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); + + mtk_dma_free(eth); + + return 0; +} + +static int __init mtk_hw_init(struct mtk_eth *eth) +{ + int err, i; + + /* reset the frame engine */ + reset_control_assert(eth->rstc); + usleep_range(10, 20); + reset_control_deassert(eth->rstc); + usleep_range(10, 20); + + /* Set GE2 driving and slew rate */ + regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); + + /* set GE2 TDSEL */ + regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); + + /* set GE2 TUNE */ + regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); + + /* GE1, Force 1000M/FD, FC ON */ + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); + + /* GE2, Force 1000M/FD, FC ON */ + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); + + /* Enable RX VLan Offloading */ + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + + err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, + dev_name(eth->dev), eth); + if (err) + return err; + + err = mtk_mdio_init(eth); + if (err) + return err; + + /* disable delay and normal interrupt */ + mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); + + /* FE int grouping */ + mtk_w32(eth, 0, MTK_FE_INT_GRP); + + for (i = 0; i < 2; i++) { + u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + + /* setup the forward port to send frame to QDMA */ + val &= ~0xffff; + val |= 0x5555; + + /* Enable RX checksum */ + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + + /* setup the mac dma */ + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); + } + + return 0; +} + +static int __init mtk_init(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + const char *mac_addr; + + mac_addr = of_get_mac_address(mac->of_node); + if (mac_addr) + ether_addr_copy(dev->dev_addr, mac_addr); + + /* If the mac address is invalid, use random mac address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + dev_err(eth->dev, "generated random MAC address %pM\n", + dev->dev_addr); + dev->addr_assign_type = NET_ADDR_RANDOM; + } + + return mtk_phy_connect(mac); +} + +static void mtk_uninit(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + phy_disconnect(mac->phy_dev); + mtk_mdio_cleanup(eth); + mtk_irq_disable(eth, ~0); + free_irq(dev->irq, dev); +} + +static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phy_mii_ioctl(mac->phy_dev, ifr, cmd); + default: + break; + } + + return -EOPNOTSUPP; +} + +static void mtk_pending_work(struct work_struct *work) +{ + struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); + struct mtk_eth *eth = mac->hw; + struct net_device *dev = eth->netdev[mac->id]; + int err; + + rtnl_lock(); + mtk_stop(dev); + + err = mtk_open(dev); + if (err) { + netif_alert(eth, ifup, dev, + "Driver up/down cycle failed, closing device.\n"); + dev_close(dev); + } + rtnl_unlock(); +} + +static int mtk_cleanup(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + struct mtk_mac *mac = netdev_priv(eth->netdev[i]); + + if (!eth->netdev[i]) + continue; + + unregister_netdev(eth->netdev[i]); + free_netdev(eth->netdev[i]); + cancel_work_sync(&mac->pending_work); + } + + return 0; +} + +static int mtk_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + int err; + + err = phy_read_status(mac->phy_dev); + if (err) + return -ENODEV; + + return phy_ethtool_gset(mac->phy_dev, cmd); +} + +static int mtk_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + + if (cmd->phy_address != mac->phy_dev->mdio.addr) { + mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus, + cmd->phy_address); + if (!mac->phy_dev) + return -ENODEV; + } + + return phy_ethtool_sset(mac->phy_dev, cmd); +} + +static void mtk_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + + strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); + info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); +} + +static u32 mtk_get_msglevel(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return mac->hw->msg_enable; +} + +static void mtk_set_msglevel(struct net_device *dev, u32 value) +{ + struct mtk_mac *mac = netdev_priv(dev); + + mac->hw->msg_enable = value; +} + +static int mtk_nway_reset(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return genphy_restart_aneg(mac->phy_dev); +} + +static u32 mtk_get_link(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + int err; + + err = genphy_update_link(mac->phy_dev); + if (err) + return ethtool_op_get_link(dev); + + return mac->phy_dev->link; +} + +static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { + memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int mtk_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(mtk_ethtool_stats); + default: + return -EOPNOTSUPP; + } +} + +static void mtk_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hwstats = mac->hw_stats; + u64 *data_src, *data_dst; + unsigned int start; + int i; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock(&hwstats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock(&hwstats->stats_lock); + } + } + + do { + data_src = (u64*)hwstats; + data_dst = data; + start = u64_stats_fetch_begin_irq(&hwstats->syncp); + + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) + *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); +} + +static struct ethtool_ops mtk_ethtool_ops = { + .get_settings = mtk_get_settings, + .set_settings = mtk_set_settings, + .get_drvinfo = mtk_get_drvinfo, + .get_msglevel = mtk_get_msglevel, + .set_msglevel = mtk_set_msglevel, + .nway_reset = mtk_nway_reset, + .get_link = mtk_get_link, + .get_strings = mtk_get_strings, + .get_sset_count = mtk_get_sset_count, + .get_ethtool_stats = mtk_get_ethtool_stats, +}; + +static const struct net_device_ops mtk_netdev_ops = { + .ndo_init = mtk_init, + .ndo_uninit = mtk_uninit, + .ndo_open = mtk_open, + .ndo_stop = mtk_stop, + .ndo_start_xmit = mtk_start_xmit, + .ndo_set_mac_address = mtk_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mtk_do_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_tx_timeout = mtk_tx_timeout, + .ndo_get_stats64 = mtk_get_stats64, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mtk_poll_controller, +#endif +}; + +static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) +{ + struct mtk_mac *mac; + const __be32 *_id = of_get_property(np, "reg", NULL); + int id, err; + + if (!_id) { + dev_err(eth->dev, "missing mac id\n"); + return -EINVAL; + } + + id = be32_to_cpup(_id); + if (id >= MTK_MAC_COUNT) { + dev_err(eth->dev, "%d is not a valid mac id\n", id); + return -EINVAL; + } + + if (eth->netdev[id]) { + dev_err(eth->dev, "duplicate mac id found: %d\n", id); + return -EINVAL; + } + + eth->netdev[id] = alloc_etherdev(sizeof(*mac)); + if (!eth->netdev[id]) { + dev_err(eth->dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + mac = netdev_priv(eth->netdev[id]); + eth->mac[id] = mac; + mac->id = id; + mac->hw = eth; + mac->of_node = np; + INIT_WORK(&mac->pending_work, mtk_pending_work); + + mac->hw_stats = devm_kzalloc(eth->dev, + sizeof(*mac->hw_stats), + GFP_KERNEL); + if (!mac->hw_stats) { + dev_err(eth->dev, "failed to allocate counter memory\n"); + err = -ENOMEM; + goto free_netdev; + } + spin_lock_init(&mac->hw_stats->stats_lock); + mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + + SET_NETDEV_DEV(eth->netdev[id], eth->dev); + eth->netdev[id]->netdev_ops = &mtk_netdev_ops; + eth->netdev[id]->base_addr = (unsigned long)eth->base; + eth->netdev[id]->vlan_features = MTK_HW_FEATURES & + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); + eth->netdev[id]->features |= MTK_HW_FEATURES; + eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; + + err = register_netdev(eth->netdev[id]); + if (err) { + dev_err(eth->dev, "error bringing up device\n"); + goto free_netdev; + } + eth->netdev[id]->irq = eth->irq; + netif_info(eth, probe, eth->netdev[id], + "mediatek frame engine at 0x%08lx, irq %d\n", + eth->netdev[id]->base_addr, eth->netdev[id]->irq); + + return 0; + +free_netdev: + free_netdev(eth->netdev[id]); + return err; +} + +static int mtk_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct device_node *mac_np; + const struct of_device_id *match; + struct mtk_soc_data *soc; + struct mtk_eth *eth; + int err; + + err = device_reset(&pdev->dev); + if (err) + return err; + + match = of_match_device(of_mtk_match, &pdev->dev); + soc = (struct mtk_soc_data *)match->data; + + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); + if (!eth) + return -ENOMEM; + + eth->base = devm_ioremap_resource(&pdev->dev, res); + if (!eth->base) + return -EADDRNOTAVAIL; + + spin_lock_init(ð->page_lock); + + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,ethsys"); + if (IS_ERR(eth->ethsys)) { + dev_err(&pdev->dev, "no ethsys regmap found\n"); + return PTR_ERR(eth->ethsys); + } + + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,pctl"); + if (IS_ERR(eth->pctl)) { + dev_err(&pdev->dev, "no pctl regmap found\n"); + return PTR_ERR(eth->pctl); + } + + eth->rstc = devm_reset_control_get(&pdev->dev, "eth"); + if (IS_ERR(eth->rstc)) { + dev_err(&pdev->dev, "no eth reset found\n"); + return PTR_ERR(eth->rstc); + } + + eth->irq = platform_get_irq(pdev, 0); + if (eth->irq < 0) { + dev_err(&pdev->dev, "no IRQ resource found\n"); + return -ENXIO; + } + + eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); + eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); + eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); + eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); + if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) || + IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif)) + return -ENODEV; + + clk_prepare_enable(eth->clk_ethif); + clk_prepare_enable(eth->clk_esw); + clk_prepare_enable(eth->clk_gp1); + clk_prepare_enable(eth->clk_gp2); + + eth->dev = &pdev->dev; + eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); + + err = mtk_hw_init(eth); + if (err) + return err; + + for_each_child_of_node(pdev->dev.of_node, mac_np) { + if (!of_device_is_compatible(mac_np, + "mediatek,eth-mac")) + continue; + + if (!of_device_is_available(mac_np)) + continue; + + err = mtk_add_mac(eth, mac_np); + if (err) + goto err_free_dev; + } + + /* we run 2 devices on the same DMA ring so we need a dummy device + * for NAPI to work + */ + init_dummy_netdev(ð->dummy_dev); + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, + MTK_NAPI_WEIGHT); + + platform_set_drvdata(pdev, eth); + + return 0; + +err_free_dev: + mtk_cleanup(eth); + return err; +} + +static int mtk_remove(struct platform_device *pdev) +{ + struct mtk_eth *eth = platform_get_drvdata(pdev); + + clk_disable_unprepare(eth->clk_ethif); + clk_disable_unprepare(eth->clk_esw); + clk_disable_unprepare(eth->clk_gp1); + clk_disable_unprepare(eth->clk_gp2); + + netif_napi_del(ð->rx_napi); + mtk_cleanup(eth); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +const struct of_device_id of_mtk_match[] = { + { .compatible = "mediatek,mt7623-eth" }, + {}, +}; + +static struct platform_driver mtk_driver = { + .probe = mtk_probe, + .remove = mtk_remove, + .driver = { + .name = "mtk_soc_eth", + .owner = THIS_MODULE, + .of_match_table = of_mtk_match, + }, +}; + +module_platform_driver(mtk_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h new file mode 100644 index 000000000000..48a5292c8ed8 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -0,0 +1,421 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> + */ + +#ifndef MTK_ETH_H +#define MTK_ETH_H + +#define MTK_QDMA_PAGE_SIZE 2048 +#define MTK_MAX_RX_LENGTH 1536 +#define MTK_TX_DMA_BUF_LEN 0x3fff +#define MTK_DMA_SIZE 256 +#define MTK_NAPI_WEIGHT 64 +#define MTK_MAC_COUNT 2 +#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) +#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) +#define MTK_DMA_DUMMY_DESC 0xffffffff +#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) +#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_SG | NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_IPV6_CSUM) +#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1)) + +/* Frame Engine Global Reset Register */ +#define MTK_RST_GL 0x04 +#define RST_GL_PSE BIT(0) + +/* Frame Engine Interrupt Status Register */ +#define MTK_INT_STATUS2 0x08 +#define MTK_GDM1_AF BIT(28) +#define MTK_GDM2_AF BIT(29) + +/* Frame Engine Interrupt Grouping Register */ +#define MTK_FE_INT_GRP 0x20 + +/* CDMP Exgress Control Register */ +#define MTK_CDMP_EG_CTRL 0x404 + +/* GDM Exgress Control Register */ +#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) +#define MTK_GDMA_ICS_EN BIT(22) +#define MTK_GDMA_TCS_EN BIT(21) +#define MTK_GDMA_UCS_EN BIT(20) + +/* Unicast Filter MAC Address Register - Low */ +#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) + +/* Unicast Filter MAC Address Register - High */ +#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) + +/* QDMA TX Queue Configuration Registers */ +#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) +#define QDMA_RES_THRES 4 + +/* QDMA TX Queue Scheduler Registers */ +#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10)) + +/* QDMA RX Base Pointer Register */ +#define MTK_QRX_BASE_PTR0 0x1900 + +/* QDMA RX Maximum Count Register */ +#define MTK_QRX_MAX_CNT0 0x1904 + +/* QDMA RX CPU Pointer Register */ +#define MTK_QRX_CRX_IDX0 0x1908 + +/* QDMA RX DMA Pointer Register */ +#define MTK_QRX_DRX_IDX0 0x190C + +/* QDMA Global Configuration Register */ +#define MTK_QDMA_GLO_CFG 0x1A04 +#define MTK_RX_2B_OFFSET BIT(31) +#define MTK_RX_BT_32DWORDS (3 << 11) +#define MTK_TX_WB_DDONE BIT(6) +#define MTK_DMA_SIZE_16DWORDS (2 << 4) +#define MTK_RX_DMA_BUSY BIT(3) +#define MTK_TX_DMA_BUSY BIT(1) +#define MTK_RX_DMA_EN BIT(2) +#define MTK_TX_DMA_EN BIT(0) +#define MTK_DMA_BUSY_TIMEOUT HZ + +/* QDMA Reset Index Register */ +#define MTK_QDMA_RST_IDX 0x1A08 +#define MTK_PST_DRX_IDX0 BIT(16) + +/* QDMA Delay Interrupt Register */ +#define MTK_QDMA_DELAY_INT 0x1A0C + +/* QDMA Flow Control Register */ +#define MTK_QDMA_FC_THRES 0x1A10 +#define FC_THRES_DROP_MODE BIT(20) +#define FC_THRES_DROP_EN (7 << 16) +#define FC_THRES_MIN 0x4444 + +/* QDMA Interrupt Status Register */ +#define MTK_QMTK_INT_STATUS 0x1A18 +#define MTK_RX_DONE_INT1 BIT(17) +#define MTK_RX_DONE_INT0 BIT(16) +#define MTK_TX_DONE_INT3 BIT(3) +#define MTK_TX_DONE_INT2 BIT(2) +#define MTK_TX_DONE_INT1 BIT(1) +#define MTK_TX_DONE_INT0 BIT(0) +#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1) +#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ + MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) + +/* QDMA Interrupt Status Register */ +#define MTK_QDMA_INT_MASK 0x1A1C + +/* QDMA Interrupt Mask Register */ +#define MTK_QDMA_HRED2 0x1A44 + +/* QDMA TX Forward CPU Pointer Register */ +#define MTK_QTX_CTX_PTR 0x1B00 + +/* QDMA TX Forward DMA Pointer Register */ +#define MTK_QTX_DTX_PTR 0x1B04 + +/* QDMA TX Release CPU Pointer Register */ +#define MTK_QTX_CRX_PTR 0x1B10 + +/* QDMA TX Release DMA Pointer Register */ +#define MTK_QTX_DRX_PTR 0x1B14 + +/* QDMA FQ Head Pointer Register */ +#define MTK_QDMA_FQ_HEAD 0x1B20 + +/* QDMA FQ Head Pointer Register */ +#define MTK_QDMA_FQ_TAIL 0x1B24 + +/* QDMA FQ Free Page Counter Register */ +#define MTK_QDMA_FQ_CNT 0x1B28 + +/* QDMA FQ Free Page Buffer Length Register */ +#define MTK_QDMA_FQ_BLEN 0x1B2C + +/* GMA1 Received Good Byte Count Register */ +#define MTK_GDM1_TX_GBCNT 0x2400 +#define MTK_STAT_OFFSET 0x40 + +/* QDMA descriptor txd4 */ +#define TX_DMA_CHKSUM (0x7 << 29) +#define TX_DMA_TSO BIT(28) +#define TX_DMA_FPORT_SHIFT 25 +#define TX_DMA_FPORT_MASK 0x7 +#define TX_DMA_INS_VLAN BIT(16) + +/* QDMA descriptor txd3 */ +#define TX_DMA_OWNER_CPU BIT(31) +#define TX_DMA_LS0 BIT(30) +#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16) +#define TX_DMA_SWC BIT(14) +#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16) + +/* QDMA descriptor rxd2 */ +#define RX_DMA_DONE BIT(31) +#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) +#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) + +/* QDMA descriptor rxd3 */ +#define RX_DMA_VID(_x) ((_x) & 0xfff) + +/* QDMA descriptor rxd4 */ +#define RX_DMA_L4_VALID BIT(24) +#define RX_DMA_FPORT_SHIFT 19 +#define RX_DMA_FPORT_MASK 0x7 + +/* PHY Indirect Access Control registers */ +#define MTK_PHY_IAC 0x10004 +#define PHY_IAC_ACCESS BIT(31) +#define PHY_IAC_READ BIT(19) +#define PHY_IAC_WRITE BIT(18) +#define PHY_IAC_START BIT(16) +#define PHY_IAC_ADDR_SHIFT 20 +#define PHY_IAC_REG_SHIFT 25 +#define PHY_IAC_TIMEOUT HZ + +/* Mac control registers */ +#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) +#define MAC_MCR_MAX_RX_1536 BIT(24) +#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) +#define MAC_MCR_FORCE_MODE BIT(15) +#define MAC_MCR_TX_EN BIT(14) +#define MAC_MCR_RX_EN BIT(13) +#define MAC_MCR_BACKOFF_EN BIT(9) +#define MAC_MCR_BACKPR_EN BIT(8) +#define MAC_MCR_FORCE_RX_FC BIT(5) +#define MAC_MCR_FORCE_TX_FC BIT(4) +#define MAC_MCR_SPEED_1000 BIT(3) +#define MAC_MCR_SPEED_100 BIT(2) +#define MAC_MCR_FORCE_DPX BIT(1) +#define MAC_MCR_FORCE_LINK BIT(0) +#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \ + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \ + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \ + MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \ + MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \ + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK) + +/* GPIO port control registers for GMAC 2*/ +#define GPIO_OD33_CTRL8 0x4c0 +#define GPIO_BIAS_CTRL 0xed0 +#define GPIO_DRV_SEL10 0xf00 + +/* ethernet subsystem config register */ +#define ETHSYS_SYSCFG0 0x14 +#define SYSCFG0_GE_MASK 0x3 +#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) + +struct mtk_rx_dma { + unsigned int rxd1; + unsigned int rxd2; + unsigned int rxd3; + unsigned int rxd4; +} __packed __aligned(4); + +struct mtk_tx_dma { + unsigned int txd1; + unsigned int txd2; + unsigned int txd3; + unsigned int txd4; +} __packed __aligned(4); + +struct mtk_eth; +struct mtk_mac; + +/* struct mtk_hw_stats - the structure that holds the traffic statistics. + * @stats_lock: make sure that stats operations are atomic + * @reg_offset: the status register offset of the SoC + * @syncp: the refcount + * + * All of the supported SoCs have hardware counters for traffic statistics. + * Whenever the status IRQ triggers we can read the latest stats from these + * counters and store them in this struct. + */ +struct mtk_hw_stats { + u64 tx_bytes; + u64 tx_packets; + u64 tx_skip; + u64 tx_collisions; + u64 rx_bytes; + u64 rx_packets; + u64 rx_overflow; + u64 rx_fcs_errors; + u64 rx_short_errors; + u64 rx_long_errors; + u64 rx_checksum_errors; + u64 rx_flow_control_packets; + + spinlock_t stats_lock; + u32 reg_offset; + struct u64_stats_sync syncp; +}; + +/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how + * memory was allocated so that it can be freed properly + */ +enum mtk_tx_flags { + MTK_TX_FLAGS_SINGLE0 = 0x01, + MTK_TX_FLAGS_PAGE0 = 0x02, +}; + +/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at + * by the TX descriptor s + * @skb: The SKB pointer of the packet being sent + * @dma_addr0: The base addr of the first segment + * @dma_len0: The length of the first segment + * @dma_addr1: The base addr of the second segment + * @dma_len1: The length of the second segment + */ +struct mtk_tx_buf { + struct sk_buff *skb; + u32 flags; + DEFINE_DMA_UNMAP_ADDR(dma_addr0); + DEFINE_DMA_UNMAP_LEN(dma_len0); + DEFINE_DMA_UNMAP_ADDR(dma_addr1); + DEFINE_DMA_UNMAP_LEN(dma_len1); +}; + +/* struct mtk_tx_ring - This struct holds info describing a TX ring + * @dma: The descriptor ring + * @buf: The memory pointed at by the ring + * @phys: The physical addr of tx_buf + * @next_free: Pointer to the next free descriptor + * @last_free: Pointer to the last free descriptor + * @thresh: The threshold of minimum amount of free descriptors + * @free_count: QDMA uses a linked list. Track how many free descriptors + * are present + */ +struct mtk_tx_ring { + struct mtk_tx_dma *dma; + struct mtk_tx_buf *buf; + dma_addr_t phys; + struct mtk_tx_dma *next_free; + struct mtk_tx_dma *last_free; + u16 thresh; + atomic_t free_count; +}; + +/* struct mtk_rx_ring - This struct holds info describing a RX ring + * @dma: The descriptor ring + * @data: The memory pointed at by the ring + * @phys: The physical addr of rx_buf + * @frag_size: How big can each fragment be + * @buf_size: The size of each packet buffer + * @calc_idx: The current head of ring + */ +struct mtk_rx_ring { + struct mtk_rx_dma *dma; + u8 **data; + dma_addr_t phys; + u16 frag_size; + u16 buf_size; + u16 calc_idx; +}; + +/* currently no SoC has more than 2 macs */ +#define MTK_MAX_DEVS 2 + +/* struct mtk_eth - This is the main datasructure for holding the state + * of the driver + * @dev: The device pointer + * @base: The mapped register i/o base + * @page_lock: Make sure that register operations are atomic + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a + * dummy for NAPI to work + * @netdev: The netdev instances + * @mac: Each netdev is linked to a physical MAC + * @irq: The IRQ that we are using + * @msg_enable: Ethtool msg level + * @ethsys: The register map pointing at the range used to setup + * MII modes + * @pctl: The register map pointing at the range used to setup + * GMAC port drive/slew values + * @dma_refcnt: track how many netdevs are using the DMA engine + * @tx_ring: Pointer to the memore holding info about the TX ring + * @rx_ring: Pointer to the memore holding info about the RX ring + * @rx_napi: The NAPI struct + * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring + * @scratch_head: The scratch memory that scratch_ring points to. + * @clk_ethif: The ethif clock + * @clk_esw: The switch clock + * @clk_gp1: The gmac1 clock + * @clk_gp2: The gmac2 clock + * @mii_bus: If there is a bus we need to create an instance for it + */ + +struct mtk_eth { + struct device *dev; + void __iomem *base; + struct reset_control *rstc; + spinlock_t page_lock; + struct net_device dummy_dev; + struct net_device *netdev[MTK_MAX_DEVS]; + struct mtk_mac *mac[MTK_MAX_DEVS]; + int irq; + u32 msg_enable; + unsigned long sysclk; + struct regmap *ethsys; + struct regmap *pctl; + atomic_t dma_refcnt; + struct mtk_tx_ring tx_ring; + struct mtk_rx_ring rx_ring; + struct napi_struct rx_napi; + struct mtk_tx_dma *scratch_ring; + void *scratch_head; + struct clk *clk_ethif; + struct clk *clk_esw; + struct clk *clk_gp1; + struct clk *clk_gp2; + struct mii_bus *mii_bus; +}; + +/* struct mtk_mac - the structure that holds the info about the MACs of the + * SoC + * @id: The number of the MAC + * @of_node: Our devicetree node + * @hw: Backpointer to our main datastruture + * @hw_stats: Packet statistics counter + * @phy_dev: The attached PHY if available + * @pending_work: The workqueue used to reset the dma ring + */ +struct mtk_mac { + int id; + struct device_node *of_node; + struct mtk_eth *hw; + struct mtk_hw_stats *hw_stats; + struct phy_device *phy_dev; + struct work_struct pending_work; +}; + +/* the struct describing the SoC. these are declared in the soc_xyz.c files */ +extern const struct of_device_id of_mtk_match[]; + +/* read the hardware status register */ +void mtk_stats_update_mac(struct mtk_mac *mac); + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); +u32 mtk_r32(struct mtk_eth *eth, unsigned reg); + +#endif /* MTK_ETH_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 1486ce902a56..9ca3734ebb6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -4,6 +4,7 @@ config MLX4_EN tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" + depends on MAY_USE_DEVLINK depends on PCI select MLX4_CORE select PTP_1588_CLOCK diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 715de8affcc9..c7e939945259 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) err = mlx4_reset_slave(dev); else err = mlx4_reset_master(dev); - BUG_ON(err != 0); + if (!err) { + mlx4_err(dev, "device was reset successfully\n"); + } else { + /* EEH could have disabled the PCI channel during reset. That's + * recoverable and the PCI error flow will handle it. + */ + if (!pci_channel_offline(dev->persist->pdev)) + BUG_ON(1); + } dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; - mlx4_err(dev, "device was reset successfully\n"); mutex_unlock(&persist->device_state_mutex); /* At that step HW was already reset, now notify clients */ diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 3348e646db70..a849da92f857 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, if (timestamp_en) cq_context->flags |= cpu_to_be32(1 << 19); - cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); + cq_context->logsize_usrpage = + cpu_to_be32((ilog2(nent) << 24) | + mlx4_to_hw_uar_index(dev, uar->index)); cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 038f9ce391e6..1494997c4f7e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { .enable = mlx4_en_phc_enable, }; +#define MLX4_EN_WRAP_AROUND_SEC 10ULL + +/* This function calculates the max shift that enables the user range + * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register. + */ +static u32 freq_to_shift(u16 freq) +{ + u32 freq_khz = freq * 1000; + u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; + u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? + max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; + /* calculate max possible multiplier in order to fit in 64bit */ + u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); + + /* This comes from the reverse of clocksource_khz2mult */ + return ilog2(div_u64(max_mul * freq_khz, 1000000)); +} + void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; @@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); - /* Using shift to make calculation more accurate. Since current HW - * clock frequency is 427 MHz, and cycles are given using a 48 bits - * register, the biggest shift when calculating using u64, is 14 - * (max_cycles * multiplier < 2^64) - */ - mdev->cycles.shift = 14; + mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); mdev->cycles.mult = clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index dd84cabb2a51..f69584a9b47f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -501,34 +501,30 @@ static u32 mlx4_en_autoneg_get(struct net_device *dev) return autoneg; } -static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) +static void ptys2ethtool_update_supported_port(unsigned long *mask, + struct mlx4_ptys_reg *ptys_reg) { u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) | MLX4_PROT_MASK(MLX4_1000BASE_T) | MLX4_PROT_MASK(MLX4_100BASE_TX))) { - return SUPPORTED_TP; - } - - if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); + } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) | MLX4_PROT_MASK(MLX4_10GBASE_SR) | MLX4_PROT_MASK(MLX4_56GBASE_SR4) | MLX4_PROT_MASK(MLX4_40GBASE_CR4) | MLX4_PROT_MASK(MLX4_40GBASE_SR4) | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { - return SUPPORTED_FIBRE; - } - - if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask); + } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) | MLX4_PROT_MASK(MLX4_40GBASE_KR4) | MLX4_PROT_MASK(MLX4_20GBASE_KR2) | MLX4_PROT_MASK(MLX4_10GBASE_KR) | MLX4_PROT_MASK(MLX4_10GBASE_KX4) | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { - return SUPPORTED_Backplane; + __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask); } - return 0; } static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) @@ -574,122 +570,111 @@ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) enum ethtool_report { SUPPORTED = 0, ADVERTISED = 1, - SPEED = 2 }; +struct ptys2ethtool_config { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); + u32 speed; +}; + +static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg, + enum ethtool_report report) +{ + switch (report) { + case SUPPORTED: + return cfg->supported; + case ADVERTISED: + return cfg->advertised; + } + return NULL; +} + +#define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ + ({ \ + struct ptys2ethtool_config *cfg; \ + const unsigned int modes[] = { __VA_ARGS__ }; \ + unsigned int i; \ + cfg = &ptys2ethtool_map[reg_]; \ + cfg->speed = speed_; \ + bitmap_zero(cfg->supported, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + bitmap_zero(cfg->advertised, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ + __set_bit(modes[i], cfg->supported); \ + __set_bit(modes[i], cfg->advertised); \ + } \ + }) + /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ -static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { - [MLX4_100BASE_TX] = { - SUPPORTED_100baseT_Full, - ADVERTISED_100baseT_Full, - SPEED_100 - }, - - [MLX4_1000BASE_T] = { - SUPPORTED_1000baseT_Full, - ADVERTISED_1000baseT_Full, - SPEED_1000 - }, - [MLX4_1000BASE_CX_SGMII] = { - SUPPORTED_1000baseKX_Full, - ADVERTISED_1000baseKX_Full, - SPEED_1000 - }, - [MLX4_1000BASE_KX] = { - SUPPORTED_1000baseKX_Full, - ADVERTISED_1000baseKX_Full, - SPEED_1000 - }, - - [MLX4_10GBASE_T] = { - SUPPORTED_10000baseT_Full, - ADVERTISED_10000baseT_Full, - SPEED_10000 - }, - [MLX4_10GBASE_CX4] = { - SUPPORTED_10000baseKX4_Full, - ADVERTISED_10000baseKX4_Full, - SPEED_10000 - }, - [MLX4_10GBASE_KX4] = { - SUPPORTED_10000baseKX4_Full, - ADVERTISED_10000baseKX4_Full, - SPEED_10000 - }, - [MLX4_10GBASE_KR] = { - SUPPORTED_10000baseKR_Full, - ADVERTISED_10000baseKR_Full, - SPEED_10000 - }, - [MLX4_10GBASE_CR] = { - SUPPORTED_10000baseKR_Full, - ADVERTISED_10000baseKR_Full, - SPEED_10000 - }, - [MLX4_10GBASE_SR] = { - SUPPORTED_10000baseKR_Full, - ADVERTISED_10000baseKR_Full, - SPEED_10000 - }, - - [MLX4_20GBASE_KR2] = { - SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, - ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, - SPEED_20000 - }, - - [MLX4_40GBASE_CR4] = { - SUPPORTED_40000baseCR4_Full, - ADVERTISED_40000baseCR4_Full, - SPEED_40000 - }, - [MLX4_40GBASE_KR4] = { - SUPPORTED_40000baseKR4_Full, - ADVERTISED_40000baseKR4_Full, - SPEED_40000 - }, - [MLX4_40GBASE_SR4] = { - SUPPORTED_40000baseSR4_Full, - ADVERTISED_40000baseSR4_Full, - SPEED_40000 - }, - - [MLX4_56GBASE_KR4] = { - SUPPORTED_56000baseKR4_Full, - ADVERTISED_56000baseKR4_Full, - SPEED_56000 - }, - [MLX4_56GBASE_CR4] = { - SUPPORTED_56000baseCR4_Full, - ADVERTISED_56000baseCR4_Full, - SPEED_56000 - }, - [MLX4_56GBASE_SR4] = { - SUPPORTED_56000baseSR4_Full, - ADVERTISED_56000baseSR4_Full, - SPEED_56000 - }, +static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ]; + +void __init mlx4_en_init_ptys2ethtool_map(void) +{ + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100, + ETHTOOL_LINK_MODE_100baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT); }; -static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) +static void ptys2ethtool_update_link_modes(unsigned long *link_modes, + u32 eth_proto, + enum ethtool_report report) { int i; - u32 link_modes = 0; - for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { if (eth_proto & MLX4_PROT_MASK(i)) - link_modes |= ptys2ethtool_map[i][report]; + bitmap_or(link_modes, link_modes, + ptys2ethtool_link_mode(&ptys2ethtool_map[i], + report), + __ETHTOOL_LINK_MODE_MASK_NBITS); } - return link_modes; } -static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) +static u32 ethtool2ptys_link_modes(const unsigned long *link_modes, + enum ethtool_report report) { int i; u32 ptys_modes = 0; for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { - if (ptys2ethtool_map[i][report] & link_modes) + if (bitmap_intersects( + ptys2ethtool_link_mode(&ptys2ethtool_map[i], + report), + link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= 1 << i; } return ptys_modes; @@ -702,14 +687,15 @@ static u32 speed2ptys_link_modes(u32 speed) u32 ptys_modes = 0; for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { - if (ptys2ethtool_map[i][SPEED] == speed) + if (ptys2ethtool_map[i].speed == speed) ptys_modes |= 1 << i; } return ptys_modes; } -static int ethtool_get_ptys_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +ethtool_get_ptys_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; @@ -737,79 +723,102 @@ static int ethtool_get_ptys_settings(struct net_device *dev, en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", be32_to_cpu(ptys_reg.eth_proto_lp_adv)); - cmd->supported = 0; - cmd->advertising = 0; + /* reset supported/advertising masks */ + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - cmd->supported |= ptys_get_supported_port(&ptys_reg); + ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported, + &ptys_reg); eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); - cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); + ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported, + eth_proto, SUPPORTED); eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); - cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); + ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising, + eth_proto, ADVERTISED); - cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Pause); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Asym_Pause); - cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? - ADVERTISED_Asym_Pause : 0; + if (priv->prof->tx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Pause); + if (priv->prof->tx_pause ^ priv->prof->rx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Asym_Pause); - cmd->port = ptys_get_active_port(&ptys_reg); - cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? - XCVR_EXTERNAL : XCVR_INTERNAL; + link_ksettings->base.port = ptys_get_active_port(&ptys_reg); if (mlx4_en_autoneg_get(dev)) { - cmd->supported |= SUPPORTED_Autoneg; - cmd->advertising |= ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); } - cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + link_ksettings->base.autoneg + = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? AUTONEG_ENABLE : AUTONEG_DISABLE; eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); - cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); - cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? - ADVERTISED_Autoneg : 0; + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + ptys2ethtool_update_link_modes( + link_ksettings->link_modes.lp_advertising, + eth_proto, ADVERTISED); + if (priv->port_state.flags & MLX4_EN_PORT_ANC) + ethtool_link_ksettings_add_link_mode(link_ksettings, + lp_advertising, Autoneg); - cmd->phy_address = 0; - cmd->mdio_support = 0; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; - cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + link_ksettings->base.phy_address = 0; + link_ksettings->base.mdio_support = 0; + link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; return ret; } -static void ethtool_get_default_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static void +ethtool_get_default_link_ksettings( + struct net_device *dev, struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); int trans_type; - cmd->autoneg = AUTONEG_DISABLE; - cmd->supported = SUPPORTED_10000baseT_Full; - cmd->advertising = ADVERTISED_10000baseT_Full; - trans_type = priv->port_state.transceiver; + link_ksettings->base.autoneg = AUTONEG_DISABLE; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + 10000baseT_Full); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, + 10000baseT_Full); + + trans_type = priv->port_state.transceiver; if (trans_type > 0 && trans_type <= 0xC) { - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_EXTERNAL; - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + link_ksettings->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, FIBRE); + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, FIBRE); } else if (trans_type == 0x80 || trans_type == 0) { - cmd->port = PORT_TP; - cmd->transceiver = XCVR_INTERNAL; - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; + link_ksettings->base.port = PORT_TP; + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, TP); + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, TP); } else { - cmd->port = -1; - cmd->transceiver = -1; + link_ksettings->base.port = -1; } } -static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int +mlx4_en_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); int ret = -EINVAL; @@ -822,16 +831,16 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) priv->port_state.flags & MLX4_EN_PORT_ANE); if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) - ret = ethtool_get_ptys_settings(dev, cmd); + ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings); if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ - ethtool_get_default_settings(dev, cmd); + ethtool_get_default_link_ksettings(dev, link_ksettings); if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); - cmd->duplex = DUPLEX_FULL; + link_ksettings->base.speed = priv->port_state.link_speed; + link_ksettings->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; + link_ksettings->base.speed = SPEED_UNKNOWN; + link_ksettings->base.duplex = DUPLEX_UNKNOWN; } return 0; } @@ -855,21 +864,29 @@ static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, return proto_admin; } -static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int +mlx4_en_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; __be32 proto_admin; int ret; - u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); - int speed = ethtool_cmd_speed(cmd); + u32 ptys_adv = ethtool2ptys_link_modes( + link_ksettings->link_modes.advertising, ADVERTISED); + const int speed = link_ksettings->base.speed; - en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", - speed, cmd->advertising, cmd->autoneg, cmd->duplex); + en_dbg(DRV, priv, + "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n", + speed, __ETHTOOL_LINK_MODE_MASK_NBITS, + link_ksettings->link_modes.advertising, + link_ksettings->base.autoneg, + link_ksettings->base.duplex); - if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || - (cmd->duplex == DUPLEX_HALF)) + if (!(priv->mdev->dev->caps.flags2 & + MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || + (link_ksettings->base.duplex == DUPLEX_HALF)) return -EINVAL; memset(&ptys_reg, 0, sizeof(ptys_reg)); @@ -883,7 +900,7 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return 0; } - proto_admin = cmd->autoneg == AUTONEG_ENABLE ? + proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ? cpu_to_be32(ptys_adv) : speed_set_ptys_admin(priv, speed, ptys_reg.eth_proto_cap); @@ -1982,8 +1999,8 @@ static int mlx4_en_set_phys_id(struct net_device *dev, const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, - .get_settings = mlx4_en_get_settings, - .set_settings = mlx4_en_set_settings, + .get_link_ksettings = mlx4_en_get_link_ksettings, + .set_link_ksettings = mlx4_en_set_link_ksettings, .get_link = ethtool_op_get_link, .get_strings = mlx4_en_get_strings, .get_sset_count = mlx4_en_get_sset_count, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index e0ec280a7fa1..bf7628db098a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -382,6 +382,7 @@ static void mlx4_en_verify_params(void) static int __init mlx4_en_init(void) { mlx4_en_verify_params(); + mlx4_en_init_ptys2ethtool_map(); return mlx4_register_interface(&mlx4_en_interface); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0c7e3f69a73b..b4b258c8ca47 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -40,6 +40,7 @@ #include <net/ip.h> #include <net/busy_poll.h> #include <net/vxlan.h> +#include <net/devlink.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> @@ -69,6 +70,15 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } +static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return mlx4_en_setup_tc(dev, tc->tc); +} + #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -2024,8 +2034,11 @@ void mlx4_en_destroy_netdev(struct net_device *dev) en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); /* Unregister device - this will close the port if it was up */ - if (priv->registered) + if (priv->registered) { + devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, + priv->port)); unregister_netdev(dev); + } if (priv->allocated) mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); @@ -2245,7 +2258,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) struct mlx4_en_dev *mdev = en_priv->mdev; u64 mac_u64 = mlx4_mac_to_u64(mac); - if (!is_valid_ether_addr(mac)) + if (is_multicast_ether_addr(mac)) return -EINVAL; return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); @@ -2344,8 +2357,6 @@ out: /* set offloads */ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; - priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2356,8 +2367,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) /* unset offloads */ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); - priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; - priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); @@ -2466,7 +2475,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, - .ndo_setup_tc = mlx4_en_setup_tc, + .ndo_setup_tc = __mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif @@ -2504,7 +2513,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { #endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, - .ndo_setup_tc = mlx4_en_setup_tc, + .ndo_setup_tc = __mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif @@ -2980,6 +2989,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->rss_hash_fn = ETH_RSS_HASH_TOP; } + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + dev->features |= NETIF_F_GSO_UDP_TUNNEL; + } + mdev->pndev[port] = dev; mdev->upper[port] = NULL; @@ -3041,6 +3055,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } priv->registered = 1; + devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), + dev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index ee99e67187f5..3904b5fc0b7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->collisions = 0; stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); - stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_over_errors = 0; stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); stats->rx_frame_errors = 0; stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); - stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_missed_errors = 0; stats->tx_aborted_errors = 0; stats->tx_carrier_errors = 0; stats->tx_fifo_errors = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 12aab5a659d3..02e925d6f734 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, } else { context->sq_size_stride = ilog2(TXBB_SIZE) - 4; } - context->usr_page = cpu_to_be32(mdev->priv_uar.index); + context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, + mdev->priv_uar.index)); context->local_qpn = cpu_to_be32(qpn); context->pri_path.ackto = 1 & 0x07; context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 41440b2b20a3..86bcfe510e4e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -82,8 +82,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, /* Not doing get_page() for each frag is a big win * on asymetric workloads. Note we can not use atomic_set(). */ - atomic_add(page_alloc->page_size / frag_info->frag_stride - 1, - &page->_count); + page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1); return 0; } @@ -127,7 +126,7 @@ out: dma_unmap_page(priv->ddev, page_alloc[i].dma, page_alloc[i].page_size, PCI_DMA_FROMDEVICE); page = page_alloc[i].page; - atomic_set(&page->_count, 1); + set_page_count(page, 1); put_page(page); } } @@ -165,7 +164,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", i, ring->page_alloc[i].page_size, - atomic_read(&ring->page_alloc[i].page->_count)); + page_ref_count(ring->page_alloc[i].page)); } return 0; @@ -177,7 +176,7 @@ out: dma_unmap_page(priv->ddev, page_alloc->dma, page_alloc->page_size, PCI_DMA_FROMDEVICE); page = page_alloc->page; - atomic_set(&page->_count, 1); + set_page_count(page, 1); put_page(page); page_alloc->page = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4421bf5463f6..c0d7b7296236 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, user_prio, &ring->context); if (ring->bf_alloced) - ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + ring->context.usr_page = + cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, + ring->bf.uar->index)); err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, &ring->qp, &ring->qp_state); @@ -274,7 +276,8 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, - int index, u8 owner, u64 timestamp) + int index, u8 owner, u64 timestamp, + int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; @@ -345,7 +348,8 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, } } } - dev_consume_skb_any(skb); + napi_consume_skb(skb, napi_mode); + return tx_info->nr_txbb; } @@ -369,7 +373,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) while (ring->cons != ring->prod) { ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->cons & ring->size_mask, - !!(ring->cons & ring->size), 0); + !!(ring->cons & ring->size), 0, + 0 /* Non-NAPI caller */); ring->cons += ring->last_nr_txbb; cnt++; } @@ -383,7 +388,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) } static bool mlx4_en_process_tx_cq(struct net_device *dev, - struct mlx4_en_cq *cq) + struct mlx4_en_cq *cq, int napi_budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; @@ -449,7 +454,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, last_nr_txbb = mlx4_en_free_tx_desc( priv, ring, ring_index, !!((ring_cons + txbbs_skipped) & - ring->size), timestamp); + ring->size), timestamp, napi_budget); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring_cons + txbbs_stamp) & @@ -509,7 +514,7 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) struct mlx4_en_priv *priv = netdev_priv(dev); int clean_complete; - clean_complete = mlx4_en_process_tx_cq(dev, cq); + clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); if (!clean_complete) return budget; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 4696053165f8..f613977455e0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) if (!priv->eq_table.uar_map[index]) { priv->eq_table.uar_map[index] = - ioremap(pci_resource_start(dev->persist->pdev, 2) + - ((eq->eqn / 4) << PAGE_SHIFT), - PAGE_SIZE); + ioremap( + pci_resource_start(dev->persist->pdev, 2) + + ((eq->eqn / 4) << (dev->uar_page_shift)), + (1 << (dev->uar_page_shift))); if (!priv->eq_table.uar_map[index]) { mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", eq->eqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index d66c690a8597..e97094598b2d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -157,7 +157,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [29] = "802.1ad offload support", [31] = "Modifying loopback source checks using UPDATE_QP support", [32] = "Loopback source checks support", - [33] = "RoCEv2 support" + [33] = "RoCEv2 support", + [34] = "DMFS Sniffer support (UC & MC)" }; int i; @@ -810,6 +811,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + if (field & 0x20) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER; MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 0472941af820..dec77d6f0ac9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include <linux/export.h> #include <linux/errno.h> +#include <net/devlink.h> #include "mlx4.h" @@ -249,3 +250,11 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int return result; } EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); + +struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + + return &info->devlink_port; +} +EXPORT_SYMBOL_GPL(mlx4_get_devlink_port); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index f1b6d219e445..358f7230da58 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -42,6 +42,7 @@ #include <linux/io-mapping.h> #include <linux/delay.h> #include <linux/kmod.h> +#include <net/devlink.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -104,6 +105,11 @@ module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); +static bool enable_4k_uar; +module_param(enable_4k_uar, bool, 0444); +MODULE_PARM_DESC(enable_4k_uar, + "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); + #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ MLX4_FUNC_CAP_DMFS_A0_STATIC) @@ -168,6 +174,20 @@ struct mlx4_port_config { static atomic_t pf_loading = ATOMIC_INIT(0); +static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + /* The reserved_uars is calculated by system page size unit. + * Therefore, adjustment is added when the uar page size is less + * than the system page size + */ + dev->caps.reserved_uars = + max_t(int, + mlx4_get_num_reserved_uar(dev), + dev_cap->reserved_uars / + (1 << (PAGE_SHIFT - dev->uar_page_shift))); +} + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -386,8 +406,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.reserved_mtts = dev_cap->reserved_mtts; dev->caps.reserved_mrws = dev_cap->reserved_mrws; - /* The first 128 UARs are used for EQ doorbells */ - dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->reserved_xrcds : 0; @@ -405,6 +423,19 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + /* Save uar page shift */ + if (!mlx4_is_slave(dev)) { + /* Virtual PCI function needs to determine UAR page size from + * firmware. Only master PCI function can set the uar page size + */ + if (enable_4k_uar) + dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; + else + dev->uar_page_shift = PAGE_SHIFT; + + mlx4_set_num_reserved_uars(dev, dev_cap); + } + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { struct mlx4_init_hca_param hca_param; @@ -815,16 +846,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENODEV; } - /* slave gets uar page size from QUERY_HCA fw command */ - dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); + /* Set uar_page_shift for VF */ + dev->uar_page_shift = hca_param.uar_page_sz + 12; - /* TODO: relax this assumption */ - if (dev->caps.uar_page_size != PAGE_SIZE) { - mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", - dev->caps.uar_page_size, PAGE_SIZE); - return -ENODEV; + /* Make sure the master uar page size is valid */ + if (dev->uar_page_shift > PAGE_SHIFT) { + mlx4_err(dev, + "Invalid configuration: uar page size is larger than system page size\n"); + return -ENODEV; } + /* Set reserved_uars based on the uar_page_shift */ + mlx4_set_num_reserved_uars(dev, &dev_cap); + + /* Although uar page size in FW differs from system page size, + * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) + * still works with assumption that uar page size == system page size + */ + dev->caps.uar_page_size = PAGE_SIZE; + memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); if (err) { @@ -1051,36 +1091,20 @@ static ssize_t show_port_type(struct device *dev, return strlen(buf); } -static ssize_t set_port_type(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int __set_port_type(struct mlx4_port_info *info, + enum mlx4_port_type port_type) { - struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, - port_attr); struct mlx4_dev *mdev = info->dev; struct mlx4_priv *priv = mlx4_priv(mdev); enum mlx4_port_type types[MLX4_MAX_PORTS]; enum mlx4_port_type new_types[MLX4_MAX_PORTS]; - static DEFINE_MUTEX(set_port_type_mutex); int i; int err = 0; - mutex_lock(&set_port_type_mutex); - - if (!strcmp(buf, "ib\n")) - info->tmp_type = MLX4_PORT_TYPE_IB; - else if (!strcmp(buf, "eth\n")) - info->tmp_type = MLX4_PORT_TYPE_ETH; - else if (!strcmp(buf, "auto\n")) - info->tmp_type = MLX4_PORT_TYPE_AUTO; - else { - mlx4_err(mdev, "%s is not supported port type\n", buf); - err = -EINVAL; - goto err_out; - } - mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); + info->tmp_type = port_type; + /* Possible type is always the one that was delivered */ mdev->caps.possible_type[info->port] = info->tmp_type; @@ -1122,6 +1146,37 @@ static ssize_t set_port_type(struct device *dev, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); + + return err; +} + +static ssize_t set_port_type(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_attr); + struct mlx4_dev *mdev = info->dev; + enum mlx4_port_type port_type; + static DEFINE_MUTEX(set_port_type_mutex); + int err; + + mutex_lock(&set_port_type_mutex); + + if (!strcmp(buf, "ib\n")) { + port_type = MLX4_PORT_TYPE_IB; + } else if (!strcmp(buf, "eth\n")) { + port_type = MLX4_PORT_TYPE_ETH; + } else if (!strcmp(buf, "auto\n")) { + port_type = MLX4_PORT_TYPE_AUTO; + } else { + mlx4_err(mdev, "%s is not supported port type\n", buf); + err = -EINVAL; + goto err_out; + } + + err = __set_port_type(info, port_type); + err_out: mutex_unlock(&set_port_type_mutex); @@ -1226,6 +1281,7 @@ err_set_port: static int mlx4_mf_bond(struct mlx4_dev *dev) { int err = 0; + int nvfs; struct mlx4_slaves_pport slaves_port1; struct mlx4_slaves_pport slaves_port2; DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); @@ -1242,11 +1298,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev) return -EINVAL; } + /* number of virtual functions is number of total functions minus one + * physical function for each port. + */ + nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + + bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; + /* limit on maximum allowed VFs */ - if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + - bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > - MAX_MF_BOND_ALLOWED_SLAVES) + if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { + mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", + nvfs, MAX_MF_BOND_ALLOWED_SLAVES); return -EINVAL; + } if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); @@ -2179,8 +2242,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev) dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; - init_hca.log_uar_sz = ilog2(dev->caps.num_uars); - init_hca.uar_page_sz = PAGE_SHIFT - 12; + if (enable_4k_uar) { + init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; + init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; + } else { + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + init_hca.uar_page_sz = PAGE_SHIFT - 12; + } + init_hca.mw_enabled = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) @@ -2847,8 +2917,13 @@ no_msi: static int mlx4_init_port_info(struct mlx4_dev *dev, int port) { + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - int err = 0; + int err; + + err = devlink_port_register(devlink, &info->devlink_port, port); + if (err) + return err; info->dev = dev; info->port = port; @@ -2873,6 +2948,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); if (err) { mlx4_err(dev, "Failed to create file for port %d\n", port); + devlink_port_unregister(&info->devlink_port); info->port = -1; } @@ -3644,23 +3720,54 @@ err_disable_pdev: return err; } +static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, + enum devlink_port_type port_type) +{ + struct mlx4_port_info *info = container_of(devlink_port, + struct mlx4_port_info, + devlink_port); + enum mlx4_port_type mlx4_port_type; + + switch (port_type) { + case DEVLINK_PORT_TYPE_AUTO: + mlx4_port_type = MLX4_PORT_TYPE_AUTO; + break; + case DEVLINK_PORT_TYPE_ETH: + mlx4_port_type = MLX4_PORT_TYPE_ETH; + break; + case DEVLINK_PORT_TYPE_IB: + mlx4_port_type = MLX4_PORT_TYPE_IB; + break; + default: + return -EOPNOTSUPP; + } + + return __set_port_type(info, mlx4_port_type); +} + +static const struct devlink_ops mlx4_devlink_ops = { + .port_type_set = mlx4_devlink_port_type_set, +}; + static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { + struct devlink *devlink; struct mlx4_priv *priv; struct mlx4_dev *dev; int ret; printk_once(KERN_INFO "%s", mlx4_version); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) + devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv)); + if (!devlink) return -ENOMEM; + priv = devlink_priv(devlink); dev = &priv->dev; dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); if (!dev->persist) { - kfree(priv); - return -ENOMEM; + ret = -ENOMEM; + goto err_devlink_free; } dev->persist->pdev = pdev; dev->persist->dev = dev; @@ -3669,14 +3776,23 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&dev->persist->device_state_mutex); mutex_init(&dev->persist->interface_state_mutex); + ret = devlink_register(devlink, &pdev->dev); + if (ret) + goto err_persist_free; + ret = __mlx4_init_one(pdev, id->driver_data, priv); - if (ret) { - kfree(dev->persist); - kfree(priv); - } else { - pci_save_state(pdev); - } + if (ret) + goto err_devlink_unregister; + pci_save_state(pdev); + return 0; + +err_devlink_unregister: + devlink_unregister(devlink); +err_persist_free: + kfree(dev->persist); +err_devlink_free: + devlink_free(devlink); return ret; } @@ -3777,6 +3893,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); + struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; mutex_lock(&persist->interface_state_mutex); @@ -3807,8 +3924,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); + devlink_unregister(devlink); kfree(dev->persist); - kfree(priv); + devlink_free(devlink); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1d4e2e054647..42d8de892bfe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -752,8 +752,10 @@ static const u8 __promisc_mode[] = { [MLX4_FS_REGULAR] = 0x0, [MLX4_FS_ALL_DEFAULT] = 0x1, [MLX4_FS_MC_DEFAULT] = 0x3, - [MLX4_FS_UC_SNIFFER] = 0x4, - [MLX4_FS_MC_SNIFFER] = 0x5, + [MLX4_FS_MIRROR_RX_PORT] = 0x4, + [MLX4_FS_MIRROR_SX_PORT] = 0x5, + [MLX4_FS_UC_SNIFFER] = 0x6, + [MLX4_FS_MC_SNIFFER] = 0x7, }; int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 7baef52db6b7..ef9683101ead 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -45,6 +45,7 @@ #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <net/devlink.h> #include <linux/mlx4/device.h> #include <linux/mlx4/driver.h> @@ -828,6 +829,7 @@ struct mlx4_port_info { struct mlx4_roce_gid_table gid_table; int base_qpn; struct cpu_rmap *rmap; + struct devlink_port devlink_port; }; struct mlx4_sense { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 35de7d2e6b34..d12ab6a73344 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -607,6 +607,7 @@ static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) +void mlx4_en_init_ptys2ethtool_map(void); void mlx4_en_update_loopback_state(struct net_device *dev, netdev_features_t features); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 609c59dc854e..b3cc3ab63799 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free); int mlx4_init_uar_table(struct mlx4_dev *dev) { - if (dev->caps.num_uars <= 128) { - mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", - dev->caps.num_uars); + int num_reserved_uar = mlx4_get_num_reserved_uar(dev); + + mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift); + mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars); + + if (dev->caps.num_uars <= num_reserved_uar) { + mlx4_err( + dev, "Only %d UAR pages (need more than %d)\n", + dev->caps.num_uars, num_reserved_uar); mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 787b7bb54d52..211c65087997 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) if (need_mf_bond) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, if (need_mf_bond) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index b46dbe29ef6c..cd9b2b28df88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, counter_index, RES_COUNTER); - if (!r || r->owner != slave) + if (!r || r->owner != slave) { ret = -EINVAL; - counter = container_of(r, struct res_counter, com); - if (!counter->port) - counter->port = port; + } else { + counter = container_of(r, struct res_counter, com); + if (!counter->port) + counter->port = port; + } spin_unlock_irq(mlx4_tlock(dev)); return ret; @@ -3139,7 +3141,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, case QP_TRANS_RTS2RTS: case QP_TRANS_SQD2SQD: case QP_TRANS_SQD2RTS: - if (slave != mlx4_master_func_num(dev)) + if (slave != mlx4_master_func_num(dev)) { if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) @@ -3158,6 +3160,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, if (qp_ctx->alt_path.mgid_index >= num_gids) return -EINVAL; } + } break; default: break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index c503ea05e742..1cf722eba607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -19,3 +19,15 @@ config MLX5_CORE_EN Ethernet support in Mellanox Technologies ConnectX-4 NIC. Ethernet and Infiniband support in ConnectX-4 are currently mutually exclusive. + +config MLX5_CORE_EN_DCB + bool "Data Center Bridging (DCB) Support" + default y + depends on MLX5_CORE_EN && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + If set to N, will not be able to configure QoS and ratelimit attributes. + This flag is depended on the kernel's DCB support. + + If unsure, set to Y diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 01c0256effb8..4fc45ee0c5d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -3,6 +3,9 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o + mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o + en_txrx.o en_clock.o vxlan.o en_tc.o + +mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 037fc4cdf5af..eb926e1ee71c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -407,6 +407,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, const char *mlx5_command_str(int command) { switch (command) { + case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: + return "QUERY_HCA_VPORT_CONTEXT"; + + case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: + return "MODIFY_HCA_VPORT_CONTEXT"; + case MLX5_CMD_OP_QUERY_HCA_CAP: return "QUERY_HCA_CAP"; @@ -560,6 +566,18 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ACCESS_REG: return "MLX5_CMD_OP_ACCESS_REG"; + case MLX5_CMD_OP_SET_WOL_ROL: + return "SET_WOL_ROL"; + + case MLX5_CMD_OP_QUERY_WOL_ROL: + return "QUERY_WOL_ROL"; + + case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: + return "ADD_VXLAN_UDP_DPORT"; + + case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: + return "DELETE_VXLAN_UDP_DPORT"; + default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index aac071a7e830..879e6276c473 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -29,6 +29,8 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#ifndef __MLX5_EN_H__ +#define __MLX5_EN_H__ #include <linux/if_vlan.h> #include <linux/etherdevice.h> @@ -38,8 +40,10 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/cq.h> +#include <linux/mlx5/port.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/transobj.h> +#include <linux/rhashtable.h> #include "wq.h" #include "mlx5_core.h" @@ -69,6 +73,11 @@ #define MLX5E_NUM_MAIN_GROUPS 9 +#ifdef CONFIG_MLX5_CORE_EN_DCB +#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ +#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ +#endif + static const char vport_strings[][ETH_GSTRING_LEN] = { /* vport statistics */ "rx_packets", @@ -95,12 +104,15 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { /* SW counters */ "tso_packets", "tso_bytes", + "tso_inner_packets", + "tso_inner_bytes", "lro_packets", "lro_bytes", "rx_csum_good", "rx_csum_none", "rx_csum_sw", "tx_csum_offload", + "tx_csum_inner", "tx_queue_stopped", "tx_queue_wake", "tx_queue_dropped", @@ -133,18 +145,21 @@ struct mlx5e_vport_stats { /* SW counters */ u64 tso_packets; u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; u64 lro_packets; u64 lro_bytes; u64 rx_csum_good; u64 rx_csum_none; u64 rx_csum_sw; u64 tx_csum_offload; + u64 tx_csum_inner; u64 tx_queue_stopped; u64 tx_queue_wake; u64 tx_queue_dropped; u64 rx_wqe_err; -#define NUM_VPORT_COUNTERS 32 +#define NUM_VPORT_COUNTERS 35 }; static const char pport_strings[][ETH_GSTRING_LEN] = { @@ -223,6 +238,7 @@ struct mlx5e_pport_stats { static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "packets", + "bytes", "csum_none", "csum_sw", "lro_packets", @@ -232,35 +248,46 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { struct mlx5e_rq_stats { u64 packets; + u64 bytes; u64 csum_none; u64 csum_sw; u64 lro_packets; u64 lro_bytes; u64 wqe_err; -#define NUM_RQ_STATS 6 +#define NUM_RQ_STATS 7 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { "packets", + "bytes", "tso_packets", "tso_bytes", + "tso_inner_packets", + "tso_inner_bytes", + "csum_offload_inner", + "nop", "csum_offload_none", "stopped", "wake", "dropped", - "nop" }; struct mlx5e_sq_stats { + /* commonly accessed in data path */ u64 packets; + u64 bytes; u64 tso_packets; u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 csum_offload_inner; + u64 nop; + /* less likely accessed in data path */ u64 csum_offload_none; u64 stopped; u64 wake; u64 dropped; - u64 nop; -#define NUM_SQ_STATS 8 +#define NUM_SQ_STATS 12 }; struct mlx5e_stats { @@ -272,7 +299,6 @@ struct mlx5e_params { u8 log_sq_size; u8 log_rq_size; u16 num_channels; - u8 default_vlan_prio; u8 num_tc; u16 rx_cq_moderation_usec; u16 rx_cq_moderation_pkts; @@ -285,6 +311,9 @@ struct mlx5e_params { u8 rss_hfunc; u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; +#ifdef CONFIG_MLX5_CORE_EN_DCB + struct ieee_ets ets; +#endif }; struct mlx5e_tstamp { @@ -304,14 +333,9 @@ enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, }; -enum cq_flags { - MLX5E_CQ_HAS_CQES = 1, -}; - struct mlx5e_cq { /* data path - accessed per cqe */ struct mlx5_cqwq wq; - unsigned long flags; /* data path - accessed per napi poll */ struct napi_struct *napi; @@ -364,6 +388,7 @@ struct mlx5e_sq_dma { enum { MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, + MLX5E_SQ_STATE_BF_ENABLE, }; struct mlx5e_sq { @@ -392,7 +417,6 @@ struct mlx5e_sq { struct mlx5_wq_cyc wq; u32 dma_fifo_mask; void __iomem *uar_map; - void __iomem *uar_bf_map; struct netdev_queue *txq; u32 sqn; u16 bf_buf_size; @@ -452,6 +476,8 @@ enum mlx5e_traffic_types { MLX5E_NUM_TT, }; +#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY) + enum mlx5e_rqt_ix { MLX5E_INDIRECTION_RQT, MLX5E_SINGLE_RQ_RQT, @@ -491,21 +517,33 @@ struct mlx5e_vlan_db { bool filter_disabled; }; +struct mlx5e_vxlan_db { + spinlock_t lock; /* protect vxlan table */ + struct radix_tree_root tree; +}; + struct mlx5e_flow_table { int num_groups; struct mlx5_flow_table *t; struct mlx5_flow_group **g; }; +struct mlx5e_tc_flow_table { + struct mlx5_flow_table *t; + + struct rhashtable_params ht_params; + struct rhashtable ht; +}; + struct mlx5e_flow_tables { struct mlx5_flow_namespace *ns; + struct mlx5e_tc_flow_table tc; struct mlx5e_flow_table vlan; struct mlx5e_flow_table main; }; struct mlx5e_priv { /* priv data path fields - start */ - int default_vlan_prio; struct mlx5e_sq **txq_to_sq_map; int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; /* priv data path fields - end */ @@ -515,7 +553,7 @@ struct mlx5e_priv { struct mlx5_uar cq_uar; u32 pdn; u32 tdn; - struct mlx5_core_mr mr; + struct mlx5_core_mkey mkey; struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; @@ -526,9 +564,9 @@ struct mlx5e_priv { struct mlx5e_flow_tables fts; struct mlx5e_eth_addr_db eth_addr; struct mlx5e_vlan_db vlan; + struct mlx5e_vxlan_db vxlan; struct mlx5e_params params; - spinlock_t async_events_spinlock; /* sync hw events */ struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; struct delayed_work update_stats_work; @@ -591,7 +629,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); -bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); @@ -618,9 +656,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); +void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5e_tx_wqe *wqe, int bf_sz) @@ -636,16 +677,12 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, * doorbell */ wmb(); - - if (bf_sz) { - __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz); - - /* flush the write-combining mapped buffer */ - wmb(); - - } else { + if (bf_sz) + __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz); + else mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); - } + /* flush the write-combining mapped buffer */ + wmb(); sq->bf_offset ^= sq->bf_buf_size; } @@ -665,4 +702,11 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) } extern const struct ethtool_ops mlx5e_ethtool_ops; +#ifdef CONFIG_MLX5_CORE_EN_DCB +extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; +int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); +#endif + u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); + +#endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index be6543570b2b..2018eebe1531 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, overflow_work); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); } @@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); u64 ns = timespec64_to_ns(ts); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_init(&tstamp->clock, &tstamp->cycles, ns); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } @@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); u64 ns; + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); ns = timecounter_read(&tstamp->clock); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); *ts = ns_to_timespec64(ns); @@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_adjtime(&tstamp->clock, delta); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } @@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) { u64 adj; u32 diff; + unsigned long flags; int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); @@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : tstamp->nominal_c_mult + diff; - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c new file mode 100644 index 000000000000..3036f279a8fd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/device.h> +#include <linux/netdevice.h> +#include "en.h" + +#define MLX5E_MAX_PRIORITY 8 + +#define MLX5E_100MB (100000) +#define MLX5E_1GB (1000000) + +static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!MLX5_CAP_GEN(priv->mdev, ets)) + return -ENOTSUPP; + + memcpy(ets, &priv->params.ets, sizeof(*ets)); + return 0; +} + +enum { + MLX5E_VENDOR_TC_GROUP_NUM = 7, + MLX5E_ETS_TC_GROUP_NUM = 0, +}; + +static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) +{ + bool any_tc_mapped_to_ets = false; + int strict_group; + int i; + + for (i = 0; i <= max_tc; i++) + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) + any_tc_mapped_to_ets = true; + + strict_group = any_tc_mapped_to_ets ? 1 : 0; + + for (i = 0; i <= max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM; + break; + case IEEE_8021QAZ_TSA_STRICT: + tc_group[i] = strict_group++; + break; + case IEEE_8021QAZ_TSA_ETS: + tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; + break; + } + } +} + +static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, + u8 *tc_group, int max_tc) +{ + int i; + + for (i = 0; i <= max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; + break; + case IEEE_8021QAZ_TSA_STRICT: + tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; + break; + case IEEE_8021QAZ_TSA_ETS: + tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC; + break; + } + } +} + +int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS]; + u8 tc_group[IEEE_8021QAZ_MAX_TCS]; + int max_tc = mlx5_max_tc(mdev); + int err; + + if (!MLX5_CAP_GEN(mdev, ets)) + return -ENOTSUPP; + + mlx5e_build_tc_group(ets, tc_group, max_tc); + mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc); + + err = mlx5_set_port_prio_tc(mdev, ets->prio_tc); + if (err) + return err; + + err = mlx5_set_port_tc_group(mdev, tc_group); + if (err) + return err; + + return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); +} + +static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) +{ + int bw_sum = 0; + int i; + + /* Validate Priority */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) + return -EINVAL; + } + + /* Validate Bandwidth Sum */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) + bw_sum += ets->tc_tx_bw[i]; + } + + if (bw_sum != 0 && bw_sum != 100) + return -EINVAL; + return 0; +} + +static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + err = mlx5e_dbcnl_validate_ets(ets); + if (err) + return err; + + err = mlx5e_dcbnl_ieee_setets_core(priv, ets); + if (err) + return err; + + memcpy(&priv->params.ets, ets, sizeof(*ets)); + priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; + + return 0; +} + +static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + pfc->pfc_cap = mlx5_max_tc(mdev) + 1; + + return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); +} + +static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + enum mlx5_port_status ps; + u8 curr_pfc_en; + int ret; + + mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL); + + if (pfc->pfc_en == curr_pfc_en) + return 0; + + mlx5_query_port_admin_status(mdev, &ps); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + + ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); + + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + + return ret; +} + +static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + return 0; +} + +static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; + int err; + int i; + + err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); + if (err) + return err; + + memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + switch (max_bw_unit[i]) { + case MLX5_100_MBPS_UNIT: + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB; + break; + case MLX5_GBPS_UNIT: + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB; + break; + case MLX5_BW_NO_LIMIT: + break; + default: + WARN(true, "non-supported BW unit"); + break; + } + } + + return 0; +} + +static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; + __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB); + int i; + + memset(max_bw_value, 0, sizeof(max_bw_value)); + memset(max_bw_unit, 0, sizeof(max_bw_unit)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + if (!maxrate->tc_maxrate[i]) { + max_bw_unit[i] = MLX5_BW_NO_LIMIT; + continue; + } + if (maxrate->tc_maxrate[i] < upper_limit_mbps) { + max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], + MLX5E_100MB); + max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1; + max_bw_unit[i] = MLX5_100_MBPS_UNIT; + } else { + max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], + MLX5E_1GB); + max_bw_unit[i] = MLX5_GBPS_UNIT; + } + } + + return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); +} + +const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { + .ieee_getets = mlx5e_dcbnl_ieee_getets, + .ieee_setets = mlx5e_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate, + .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc, + .getdcbx = mlx5e_dcbnl_getdcbx, + .setdcbx = mlx5e_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 65624ac65b4c..68834b715f6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -211,13 +211,14 @@ static void mlx5e_get_strings(struct net_device *dev, sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, rq_stats_strings[j]); - for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->params.num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) + for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + - (idx++) * ETH_GSTRING_LEN, - "tx%d_%d_%s", i, tc, - sq_stats_strings[j]); + (idx++) * ETH_GSTRING_LEN, + "tx%d_%s", + priv->channeltc_to_txq_map[i][tc], + sq_stats_strings[j]); break; } } @@ -249,8 +250,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, &priv->state) ? 0 : ((u64 *)&priv->channel[i]->rq.stats)[j]; - for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->params.num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) + for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = !test_bit(MLX5E_STATE_OPENED, &priv->state) ? 0 : @@ -385,6 +386,8 @@ static int mlx5e_set_channels(struct net_device *dev, mlx5e_close_locked(dev); priv->params.num_channels = count; + mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); if (was_opened) err = mlx5e_open_locked(dev); @@ -399,6 +402,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) + return -ENOTSUPP; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; @@ -416,11 +422,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev, int tc; int i; + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + return -ENOTSUPP; + + mutex_lock(&priv->state_lock); priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + for (i = 0; i < priv->params.num_channels; ++i) { c = priv->channel[i]; @@ -436,6 +449,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev, coal->rx_max_coalesced_frames); } +out: + mutex_unlock(&priv->state_lock); return 0; } @@ -703,18 +718,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) +{ + struct mlx5_core_dev *mdev = priv->mdev; + void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + int i; + + MLX5_SET(modify_tir_in, in, bitmask.hash, 1); + mlx5e_build_tir_ctx_hash(tirc, priv); + + for (i = 0; i < MLX5E_NUM_TT; i++) + if (IS_HASHING_TT(i)) + mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen); +} + static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct mlx5e_priv *priv = netdev_priv(dev); - bool close_open; - int err = 0; + int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + void *in; if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_XOR) && (hfunc != ETH_RSS_HASH_TOP)) return -EINVAL; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + mutex_lock(&priv->state_lock); if (indir) { @@ -723,11 +756,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); } - close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) && - test_bit(MLX5E_STATE_OPENED, &priv->state); - if (close_open) - mlx5e_close_locked(dev); - if (key) memcpy(priv->params.toeplitz_hash_key, key, sizeof(priv->params.toeplitz_hash_key)); @@ -735,12 +763,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, if (hfunc != ETH_RSS_HASH_NO_CHANGE) priv->params.rss_hfunc = hfunc; - if (close_open) - err = mlx5e_open_locked(priv->netdev); + mlx5e_modify_tirs_hash(priv, in, inlen); mutex_unlock(&priv->state_lock); - return err; + kvfree(in); + + return 0; } static int mlx5e_get_rxnfc(struct net_device *netdev, @@ -884,6 +913,129 @@ static int mlx5e_get_ts_info(struct net_device *dev, return 0; } +static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev) +{ + __u32 ret = 0; + + if (MLX5_CAP_GEN(mdev, wol_g)) + ret |= WAKE_MAGIC; + + if (MLX5_CAP_GEN(mdev, wol_s)) + ret |= WAKE_MAGICSECURE; + + if (MLX5_CAP_GEN(mdev, wol_a)) + ret |= WAKE_ARP; + + if (MLX5_CAP_GEN(mdev, wol_b)) + ret |= WAKE_BCAST; + + if (MLX5_CAP_GEN(mdev, wol_m)) + ret |= WAKE_MCAST; + + if (MLX5_CAP_GEN(mdev, wol_u)) + ret |= WAKE_UCAST; + + if (MLX5_CAP_GEN(mdev, wol_p)) + ret |= WAKE_PHY; + + return ret; +} + +static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode) +{ + __u32 ret = 0; + + if (mode & MLX5_WOL_MAGIC) + ret |= WAKE_MAGIC; + + if (mode & MLX5_WOL_SECURED_MAGIC) + ret |= WAKE_MAGICSECURE; + + if (mode & MLX5_WOL_ARP) + ret |= WAKE_ARP; + + if (mode & MLX5_WOL_BROADCAST) + ret |= WAKE_BCAST; + + if (mode & MLX5_WOL_MULTICAST) + ret |= WAKE_MCAST; + + if (mode & MLX5_WOL_UNICAST) + ret |= WAKE_UCAST; + + if (mode & MLX5_WOL_PHY_ACTIVITY) + ret |= WAKE_PHY; + + return ret; +} + +static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode) +{ + u8 ret = 0; + + if (mode & WAKE_MAGIC) + ret |= MLX5_WOL_MAGIC; + + if (mode & WAKE_MAGICSECURE) + ret |= MLX5_WOL_SECURED_MAGIC; + + if (mode & WAKE_ARP) + ret |= MLX5_WOL_ARP; + + if (mode & WAKE_BCAST) + ret |= MLX5_WOL_BROADCAST; + + if (mode & WAKE_MCAST) + ret |= MLX5_WOL_MULTICAST; + + if (mode & WAKE_UCAST) + ret |= MLX5_WOL_UNICAST; + + if (mode & WAKE_PHY) + ret |= MLX5_WOL_PHY_ACTIVITY; + + return ret; +} + +static void mlx5e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 mlx5_wol_mode; + int err; + + memset(wol, 0, sizeof(*wol)); + + wol->supported = mlx5e_get_wol_supported(mdev); + if (!wol->supported) + return; + + err = mlx5_query_port_wol(mdev, &mlx5_wol_mode); + if (err) + return; + + wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode); +} + +static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + __u32 wol_supported = mlx5e_get_wol_supported(mdev); + u32 mlx5_wol_mode; + + if (!wol_supported) + return -ENOTSUPP; + + if (wol->wolopts & ~wol_supported) + return -EINVAL; + + mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts); + + return mlx5_set_port_wol(mdev, mlx5_wol_mode); +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -908,4 +1060,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_pauseparam = mlx5e_get_pauseparam, .set_pauseparam = mlx5e_set_pauseparam, .get_ts_info = mlx5e_get_ts_info, + .get_wol = mlx5e_get_wol, + .set_wol = mlx5e_set_wol, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 80d81abc4820..d00a24203410 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1041,7 +1041,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1150,7 +1150,7 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6a3e430f1062..e0adb604f461 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -30,9 +30,14 @@ * SOFTWARE. */ +#include <net/tc_act/tc_gact.h> +#include <net/pkt_cls.h> #include <linux/mlx5/fs.h> +#include <net/vxlan.h> #include "en.h" +#include "en_tc.h" #include "eswitch.h" +#include "vxlan.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -141,11 +146,18 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) return; /* Collect firts the SW counters and then HW for consistency */ + s->rx_packets = 0; + s->rx_bytes = 0; + s->tx_packets = 0; + s->tx_bytes = 0; s->tso_packets = 0; s->tso_bytes = 0; + s->tso_inner_packets = 0; + s->tso_inner_bytes = 0; s->tx_queue_stopped = 0; s->tx_queue_wake = 0; s->tx_queue_dropped = 0; + s->tx_csum_inner = 0; tx_offload_none = 0; s->lro_packets = 0; s->lro_bytes = 0; @@ -155,6 +167,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; s->lro_packets += rq_stats->lro_packets; s->lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; @@ -164,11 +178,16 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; s->tso_packets += sq_stats->tso_packets; s->tso_bytes += sq_stats->tso_bytes; + s->tso_inner_packets += sq_stats->tso_inner_packets; + s->tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; + s->tx_csum_inner += sq_stats->csum_offload_inner; tx_offload_none += sq_stats->csum_offload_none; } } @@ -225,25 +244,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->tx_broadcast_bytes = MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); - s->rx_packets = - s->rx_unicast_packets + - s->rx_multicast_packets + - s->rx_broadcast_packets; - s->rx_bytes = - s->rx_unicast_bytes + - s->rx_multicast_bytes + - s->rx_broadcast_bytes; - s->tx_packets = - s->tx_unicast_packets + - s->tx_multicast_packets + - s->tx_broadcast_packets; - s->tx_bytes = - s->tx_unicast_bytes + - s->tx_multicast_bytes + - s->tx_broadcast_bytes; - /* Update calculated offload counters */ - s->tx_csum_offload = s->tx_packets - tx_offload_none; + s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; s->rx_csum_good = s->rx_packets - s->rx_csum_none - s->rx_csum_sw; @@ -267,9 +269,14 @@ static void mlx5e_update_stats_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } -static void __mlx5e_async_event(struct mlx5e_priv *priv, - enum mlx5_dev_event event) +static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, + enum mlx5_dev_event event, unsigned long param) { + struct mlx5e_priv *priv = vpriv; + + if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + return; + switch (event) { case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: @@ -281,17 +288,6 @@ static void __mlx5e_async_event(struct mlx5e_priv *priv, } } -static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, - enum mlx5_dev_event event, unsigned long param) -{ - struct mlx5e_priv *priv = vpriv; - - spin_lock(&priv->async_events_spinlock); - if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) - __mlx5e_async_event(priv, event); - spin_unlock(&priv->async_events_spinlock); -} - static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); @@ -299,9 +295,8 @@ static void mlx5e_enable_async_events(struct mlx5e_priv *priv) static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { - spin_lock_irq(&priv->async_events_spinlock); clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); - spin_unlock_irq(&priv->async_events_spinlock); + synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); } #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) @@ -547,7 +542,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, int txq_ix; int err; - err = mlx5_alloc_map_uar(mdev, &sq->uar); + err = mlx5_alloc_map_uar(mdev, &sq->uar, true); if (err) return err; @@ -559,8 +554,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, goto err_unmap_free_uar; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - sq->uar_map = sq->uar.map; - sq->uar_bf_map = sq->uar.bf_map; + if (sq->uar.bf_map) { + set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state); + sq->uar_map = sq->uar.bf_map; + } else { + sq->uar_map = sq->uar.map; + } sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; @@ -869,12 +868,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c, if (err) goto err_destroy_cq; - err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); - if (err) - goto err_destroy_cq; - + if (MLX5_CAP_GEN(mdev, cq_moderation)) + mlx5_core_modify_cq_moderation(mdev, &cq->mcq, + moderation_usecs, + moderation_frames); return 0; err_destroy_cq: @@ -982,7 +979,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mr.key); + c->mkey_be = cpu_to_be32(priv->mkey.key); c->num_tc = priv->params.num_tc; mlx5e_build_channeltc_to_txq_map(priv, ix); @@ -1063,6 +1060,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, param->wq.linear = 1; } +static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) +{ + void *rqc = param->rqc; + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); +} + static void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_sq_param *param) { @@ -1199,7 +1205,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); ix = priv->params.indirection_rqt[ix]; - ix = ix % priv->params.num_channels; MLX5_SET(rqtc, rqtc, rq_num[i], test_bit(MLX5E_STATE_OPENED, &priv->state) ? priv->channel[ix]->rq.rqn : @@ -1317,7 +1322,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) lro_timer_supported_periods[2])); } -static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) +void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) +{ + MLX5_SET(tirc, tirc, rx_hash_fn, + mlx5e_rx_hash_fn(priv->params.rss_hfunc)); + if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { + void *rss_key = MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key); + size_t len = MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + memcpy(rss_key, priv->params.toeplitz_hash_key, len); + } +} + +static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1325,6 +1345,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) void *tirc; int inlen; int err; + int tt; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); @@ -1336,7 +1357,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) mlx5e_build_tir_ctx_lro(tirc, priv); - err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + if (err) + break; + } kvfree(in); @@ -1400,6 +1425,24 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev) return 0; } +static void mlx5e_netdev_set_tcs(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int nch = priv->params.num_channels; + int ntc = priv->params.num_tc; + int tc; + + netdev_reset_tc(netdev); + + if (ntc == 1) + return; + + netdev_set_num_tc(netdev, ntc); + + for (tc = 0; tc < ntc; tc++) + netdev_set_tc_queue(netdev, tc, nch, tc * nch); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1408,6 +1451,8 @@ int mlx5e_open_locked(struct net_device *netdev) set_bit(MLX5E_STATE_OPENED, &priv->state); + mlx5e_netdev_set_tcs(netdev); + num_txqs = priv->params.num_channels * priv->params.num_tc; netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->params.num_channels); @@ -1430,8 +1475,8 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_close_channels; } - mlx5e_update_carrier(priv); mlx5e_redirect_rqts(priv); + mlx5e_update_carrier(priv); mlx5e_timestamp_init(priv); schedule_delayed_work(&priv->update_stats_work, 0); @@ -1470,8 +1515,8 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_timestamp_cleanup(priv); - mlx5e_redirect_rqts(priv); netif_carrier_off(priv->netdev); + mlx5e_redirect_rqts(priv); mlx5e_close_channels(priv); return 0; @@ -1553,8 +1598,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv) memset(&cq_param, 0, sizeof(cq_param)); memset(&rq_param, 0, sizeof(rq_param)); - mlx5e_build_rx_cq_param(priv, &cq_param); - mlx5e_build_rq_param(priv, &rq_param); + mlx5e_build_drop_rq_param(&rq_param); err = mlx5e_create_drop_cq(priv, cq, &cq_param); if (err) @@ -1602,7 +1646,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) memset(in, 0, sizeof(in)); - MLX5_SET(tisc, tisc, prio, tc); + MLX5_SET(tisc, tisc, prio, tc << 1); MLX5_SET(tisc, tisc, transport_domain, priv->tdn); return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); @@ -1618,7 +1662,7 @@ static int mlx5e_create_tises(struct mlx5e_priv *priv) int err; int tc; - for (tc = 0; tc < priv->params.num_tc; tc++) { + for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) { err = mlx5e_create_tis(priv, tc); if (err) goto err_close_tises; @@ -1637,7 +1681,7 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv) { int tc; - for (tc = 0; tc < priv->params.num_tc; tc++) + for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) mlx5e_destroy_tis(priv, tc); } @@ -1672,17 +1716,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) default: MLX5_SET(tirc, tirc, indirect_table, priv->rqtn[MLX5E_INDIRECTION_RQT]); - MLX5_SET(tirc, tirc, rx_hash_fn, - mlx5e_rx_hash_fn(priv->params.rss_hfunc)); - if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { - void *rss_key = MLX5_ADDR_OF(tirc, tirc, - rx_hash_toeplitz_key); - size_t len = MLX5_FLD_SZ_BYTES(tirc, - rx_hash_toeplitz_key); - - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - memcpy(rss_key, priv->params.toeplitz_hash_key, len); - } + mlx5e_build_tir_ctx_hash(tirc, priv); break; } @@ -1824,6 +1858,58 @@ static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) mlx5e_destroy_tir(priv, i); } +static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + bool was_opened; + int err = 0; + + if (tc && tc != MLX5E_MAX_NUM_TC) + return -EINVAL; + + mutex_lock(&priv->state_lock); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params.num_tc = tc ? tc : 1; + + if (was_opened) + err = mlx5e_open_locked(priv->netdev); + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + goto mqprio; + + switch (tc->type) { + case TC_SETUP_CLSFLOWER: + switch (tc->cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, proto, tc->cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, tc->cls_flower); + } + default: + return -EOPNOTSUPP; + } + +mqprio: + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return mlx5e_setup_tc(dev, tc->tc); +} + static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -1885,8 +1971,10 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_close_locked(priv->netdev); priv->params.lro_en = !!(features & NETIF_F_LRO); - mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); - mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); + err = mlx5e_modify_tirs_lro(priv); + if (err) + mlx5_core_warn(priv->mdev, "lro modify failed, %d\n", + err); if (was_opened) err = mlx5e_open_locked(priv->netdev); @@ -1901,6 +1989,13 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_disable_vlan_filter(priv); } + if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) && + mlx5e_tc_num_filters(priv)) { + netdev_err(netdev, + "Active offloaded tc filters, can't turn hw_tc_offload off\n"); + return -EINVAL; + } + return err; } @@ -2024,18 +2119,116 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } -static struct net_device_ops mlx5e_netdev_ops = { +static void mlx5e_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!mlx5e_vxlan_allowed(priv->mdev)) + return; + + mlx5e_vxlan_add_port(priv, be16_to_cpu(port)); +} + +static void mlx5e_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!mlx5e_vxlan_allowed(priv->mdev)) + return; + + mlx5e_vxlan_del_port(priv, be16_to_cpu(port)); +} + +static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, + struct sk_buff *skb, + netdev_features_t features) +{ + struct udphdr *udph; + u16 proto; + u16 port = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + proto = ipv6_hdr(skb)->nexthdr; + break; + default: + goto out; + } + + if (proto == IPPROTO_UDP) { + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + } + + /* Verify if UDP port is being offloaded by HW */ + if (port && mlx5e_vxlan_lookup_port(priv, port)) + return features; + +out: + /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +static netdev_features_t mlx5e_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + /* Validate if the tunneled packet is being offloaded by HW */ + if (skb->encapsulation && + (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) + return mlx5e_vxlan_features_check(priv, skb, features); + + return features; +} + +static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, + .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, .ndo_set_mac_address = mlx5e_set_mac, - .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, + .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, - .ndo_change_mtu = mlx5e_change_mtu, - .ndo_do_ioctl = mlx5e_ioctl, + .ndo_change_mtu = mlx5e_change_mtu, + .ndo_do_ioctl = mlx5e_ioctl, +}; + +static const struct net_device_ops mlx5e_netdev_ops_sriov = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_select_queue = mlx5e_select_queue, + .ndo_get_stats64 = mlx5e_get_stats, + .ndo_set_rx_mode = mlx5e_set_rx_mode, + .ndo_set_mac_address = mlx5e_set_mac, + .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, + .ndo_set_features = mlx5e_set_features, + .ndo_change_mtu = mlx5e_change_mtu, + .ndo_do_ioctl = mlx5e_ioctl, + .ndo_add_vxlan_port = mlx5e_add_vxlan_port, + .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_features_check = mlx5e_features_check, + .ndo_set_vf_mac = mlx5e_set_vf_mac, + .ndo_set_vf_vlan = mlx5e_set_vf_vlan, + .ndo_get_vf_config = mlx5e_get_vf_config, + .ndo_set_vf_link_state = mlx5e_set_vf_link_state, + .ndo_get_vf_stats = mlx5e_get_vf_stats, }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -2057,6 +2250,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) } if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + mlx5_core_warn(mdev, "CQ modiration is not supported\n"); return 0; } @@ -2070,12 +2265,38 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; } +#ifdef CONFIG_MLX5_CORE_EN_DCB +static void mlx5e_ets_init(struct mlx5e_priv *priv) +{ + int i; + + priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; + for (i = 0; i < priv->params.ets.ets_cap; i++) { + priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; + priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; + priv->params.ets.prio_tc[i] = i; + } + + /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ + priv->params.ets.prio_tc[0] = 1; + priv->params.ets.prio_tc[1] = 0; +} +#endif + +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) +{ + int i; + + for (i = 0; i < len; i++) + indirection_rqt[i] = i % num_channels; +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) { struct mlx5e_priv *priv = netdev_priv(netdev); - int i; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -2093,14 +2314,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.min_rx_wqes = MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; priv->params.num_tc = 1; - priv->params.default_vlan_prio = 0; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; netdev_rss_key_fill(priv->params.toeplitz_hash_key, sizeof(priv->params.toeplitz_hash_key)); - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) - priv->params.indirection_rqt[i] = i % num_channels; + mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, num_channels); priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; @@ -2108,9 +2328,11 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->mdev = mdev; priv->netdev = netdev; priv->params.num_channels = num_channels; - priv->default_vlan_prio = priv->params.default_vlan_prio; - spin_lock_init(&priv->async_events_spinlock); +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_ets_init(priv); +#endif + mutex_init(&priv->state_lock); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); @@ -2137,18 +2359,15 @@ static void mlx5e_build_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (priv->params.num_tc > 1) - mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac; - mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan; - mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config; - mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state; - mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats; + netdev->netdev_ops = &mlx5e_netdev_ops_sriov; +#ifdef CONFIG_MLX5_CORE_EN_DCB + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; +#endif + } else { + netdev->netdev_ops = &mlx5e_netdev_ops_basic; } - netdev->netdev_ops = &mlx5e_netdev_ops; netdev->watchdog_timeo = 15 * HZ; netdev->ethtool_ops = &mlx5e_ethtool_ops; @@ -2170,10 +2389,27 @@ static void mlx5e_build_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (mlx5e_vxlan_allowed(mdev)) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_enc_features |= NETIF_F_IP_CSUM; + netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_TSO; + netdev->hw_enc_features |= NETIF_F_TSO6; + netdev->hw_enc_features |= NETIF_F_RXHASH; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + } + netdev->features = netdev->hw_features; if (!priv->params.lro_en) netdev->features &= ~NETIF_F_LRO; +#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) + if (FT_CAP(flow_modify_en) && + FT_CAP(modify_root) && + FT_CAP(identified_miss_table_mode) && + FT_CAP(flow_table_modify)) + priv->netdev->hw_features |= NETIF_F_HW_TC; + netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; @@ -2182,7 +2418,7 @@ static void mlx5e_build_netdev(struct net_device *netdev) } static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, - struct mlx5_core_mr *mr) + struct mlx5_core_mkey *mkey) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_create_mkey_mbox_in *in; @@ -2198,7 +2434,7 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL, + err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, NULL); kvfree(in); @@ -2216,7 +2452,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) if (mlx5e_check_required_hca_cap(mdev)) return NULL; - netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch); + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + nch * MLX5E_MAX_NUM_TC, + nch); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); return NULL; @@ -2229,7 +2467,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) priv = netdev_priv(netdev); - err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); + err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); if (err) { mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); goto err_free_netdev; @@ -2247,7 +2485,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_dealloc_pd; } - err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); + err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey); if (err) { mlx5_core_err(mdev, "create mkey failed, %d\n", err); goto err_dealloc_transport_domain; @@ -2291,17 +2529,33 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) mlx5e_init_eth_addr(priv); + mlx5e_vxlan_init(priv); + + err = mlx5e_tc_init(priv); + if (err) + goto err_destroy_flow_tables; + +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); +#endif + err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_destroy_flow_tables; + goto err_tc_cleanup; } + if (mlx5e_vxlan_allowed(mdev)) + vxlan_get_rx_port(netdev); + mlx5e_enable_async_events(priv); schedule_work(&priv->set_rx_mode_work); return priv; +err_tc_cleanup: + mlx5e_tc_cleanup(priv); + err_destroy_flow_tables: mlx5e_destroy_flow_tables(priv); @@ -2321,7 +2575,7 @@ err_destroy_tises: mlx5e_destroy_tises(priv); err_destroy_mkey: - mlx5_core_destroy_mkey(mdev, &priv->mr); + mlx5_core_destroy_mkey(mdev, &priv->mkey); err_dealloc_transport_domain: mlx5_core_dealloc_transport_domain(mdev, priv->tdn); @@ -2349,13 +2603,15 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5e_disable_async_events(priv); flush_scheduled_work(); unregister_netdev(netdev); + mlx5e_tc_cleanup(priv); + mlx5e_vxlan_cleanup(priv); mlx5e_destroy_flow_tables(priv); mlx5e_destroy_tirs(priv); mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); mlx5e_close_drop_rq(priv); mlx5e_destroy_tises(priv); - mlx5_core_destroy_mkey(priv->mdev, &priv->mr); + mlx5_core_destroy_mkey(priv->mdev, &priv->mkey); mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index dd959d929aad..58d4e2f962c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -35,6 +35,7 @@ #include <linux/tcp.h> #include <net/busy_poll.h> #include "en.h" +#include "en_tc.h" static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) { @@ -167,14 +168,15 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb) static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, - struct sk_buff *skb) + struct sk_buff *skb, + bool lro) { if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; - if (likely(cqe->hds_ip_ext & CQE_L4_OK)) { + if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if (is_first_ethertype_ip(skb)) { + } else if (likely(is_first_ethertype_ip(skb))) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); rq->stats.csum_sw++; @@ -211,7 +213,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (unlikely(mlx5e_rx_hw_stamp(tstamp))) mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); - mlx5e_handle_csum(netdev, cqe, rq, skb); + mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); skb->protocol = eth_type_trans(skb, netdev); @@ -223,6 +225,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (cqe_has_vlan(cqe)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); + + skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; } int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) @@ -230,10 +234,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done; - /* avoid accessing cq (dma coherent memory) if not needed */ - if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return 0; - for (work_done = 0; work_done < budget; work_done++) { struct mlx5e_rx_wqe *wqe; struct mlx5_cqe64 *cqe; @@ -267,6 +267,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) mlx5e_build_rx_skb(cqe, rq, skb); rq->stats.packets++; + rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); napi_gro_receive(cq->napi, skb); wq_ll_pop: @@ -279,8 +280,5 @@ wq_ll_pop: /* ensure cq space is freed before enabling more cqes */ wmb(); - if (work_done == budget) - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return work_done; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c new file mode 100644 index 000000000000..b3de09f13425 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -0,0 +1,429 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <net/flow_dissector.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_skbedit.h> +#include <linux/mlx5/fs.h> +#include <linux/mlx5/device.h> +#include <linux/rhashtable.h> +#include "en.h" +#include "en_tc.h" + +struct mlx5e_tc_flow { + struct rhash_head node; + u64 cookie; + struct mlx5_flow_rule *rule; +}; + +#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024 +#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4 + +static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, + u32 *match_c, u32 *match_v, + u32 action, u32 flow_tag) +{ + struct mlx5_flow_destination dest = { + .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, + {.ft = priv->fts.vlan.t}, + }; + struct mlx5_flow_rule *rule; + bool table_created = false; + + if (IS_ERR_OR_NULL(priv->fts.tc.t)) { + priv->fts.tc.t = + mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0, + MLX5E_TC_FLOW_TABLE_NUM_ENTRIES, + MLX5E_TC_FLOW_TABLE_NUM_GROUPS); + if (IS_ERR(priv->fts.tc.t)) { + netdev_err(priv->netdev, + "Failed to create tc offload table\n"); + return ERR_CAST(priv->fts.tc.t); + } + + table_created = true; + } + + rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS, + match_c, match_v, + action, flow_tag, + action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL); + + if (IS_ERR(rule) && table_created) { + mlx5_destroy_flow_table(priv->fts.tc.t); + priv->fts.tc.t = NULL; + } + + return rule; +} + +static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + struct mlx5_flow_rule *rule) +{ + mlx5_del_flow_rule(rule); + + if (!mlx5e_tc_num_filters(priv)) { + mlx5_destroy_flow_table(priv->fts.tc.t); + priv->fts.tc.t = NULL; + } +} + +static int parse_cls_flower(struct mlx5e_priv *priv, + u32 *match_c, u32 *match_v, + struct tc_cls_flower_offload *f) +{ + void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); + u16 addr_type = 0; + u8 ip_proto = 0; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", + f->dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + addr_type = key->addr_type; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + ip_proto = key->ip_proto; + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(key->n_proto)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, + mask->ip_proto); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + key->ip_proto); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dmac_47_16), + mask->dst); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dmac_47_16), + key->dst); + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + smac_47_16), + mask->src); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + smac_47_16), + key->src); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &mask->src, sizeof(mask->src)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &key->src, sizeof(key->src)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &mask->dst, sizeof(mask->dst)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &key->dst, sizeof(key->dst)); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &mask->src, sizeof(mask->src)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &key->src, sizeof(key->src)); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &mask->dst, sizeof(mask->dst)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &key->dst, sizeof(key->dst)); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + struct flow_dissector_key_ports *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + switch (ip_proto) { + case IPPROTO_TCP: + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + tcp_sport, ntohs(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + tcp_sport, ntohs(key->src)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + tcp_dport, ntohs(mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + tcp_dport, ntohs(key->dst)); + break; + + case IPPROTO_UDP: + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + udp_sport, ntohs(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + udp_sport, ntohs(key->src)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + udp_dport, ntohs(mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + udp_dport, ntohs(key->dst)); + break; + default: + netdev_err(priv->netdev, + "Only UDP and TCP transport are supported\n"); + return -EINVAL; + } + } + + return 0; +} + +static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + u32 *action, u32 *flow_tag) +{ + const struct tc_action *a; + + if (tc_no_actions(exts)) + return -EINVAL; + + *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + *action = 0; + + tc_for_each_action(a, exts) { + /* Only support a single action per rule */ + if (*action) + return -EINVAL; + + if (is_tcf_gact_shot(a)) { + *action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + continue; + } + + if (is_tcf_skbedit_mark(a)) { + u32 mark = tcf_skbedit_mark(a); + + if (mark & ~MLX5E_TC_FLOW_ID_MASK) { + netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n", + mark); + return -EINVAL; + } + + *flow_tag = mark; + *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + continue; + } + + return -EINVAL; + } + + return 0; +} + +int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, + struct tc_cls_flower_offload *f) +{ + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + u32 *match_c; + u32 *match_v; + int err = 0; + u32 flow_tag; + u32 action; + struct mlx5e_tc_flow *flow; + struct mlx5_flow_rule *old = NULL; + + flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, + tc->ht_params); + if (flow) + old = flow->rule; + else + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_c || !match_v || !flow) { + err = -ENOMEM; + goto err_free; + } + + flow->cookie = f->cookie; + + err = parse_cls_flower(priv, match_c, match_v, f); + if (err < 0) + goto err_free; + + err = parse_tc_actions(priv, f->exts, &action, &flow_tag); + if (err < 0) + goto err_free; + + err = rhashtable_insert_fast(&tc->ht, &flow->node, + tc->ht_params); + if (err) + goto err_free; + + flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action, + flow_tag); + if (IS_ERR(flow->rule)) { + err = PTR_ERR(flow->rule); + goto err_hash_del; + } + + if (old) + mlx5e_tc_del_flow(priv, old); + + goto out; + +err_hash_del: + rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); + +err_free: + if (!old) + kfree(flow); +out: + kfree(match_c); + kfree(match_v); + return err; +} + +int mlx5e_delete_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f) +{ + struct mlx5e_tc_flow *flow; + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + + flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, + tc->ht_params); + if (!flow) + return -EINVAL; + + rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); + + mlx5e_tc_del_flow(priv, flow->rule); + + kfree(flow); + + return 0; +} + +static const struct rhashtable_params mlx5e_tc_flow_ht_params = { + .head_offset = offsetof(struct mlx5e_tc_flow, node), + .key_offset = offsetof(struct mlx5e_tc_flow, cookie), + .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), + .automatic_shrinking = true, +}; + +int mlx5e_tc_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + + tc->ht_params = mlx5e_tc_flow_ht_params; + return rhashtable_init(&tc->ht, &tc->ht_params); +} + +static void _mlx5e_tc_del_flow(void *ptr, void *arg) +{ + struct mlx5e_tc_flow *flow = ptr; + struct mlx5e_priv *priv = arg; + + mlx5e_tc_del_flow(priv, flow->rule); + kfree(flow); +} + +void mlx5e_tc_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + + rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); + + if (!IS_ERR_OR_NULL(priv->fts.tc.t)) { + mlx5_destroy_flow_table(priv->fts.tc.t); + priv->fts.tc.t = NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h new file mode 100644 index 000000000000..d677428dc10f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_EN_TC_H__ +#define __MLX5_EN_TC_H__ + +#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff + +int mlx5e_tc_init(struct mlx5e_priv *priv); +void mlx5e_tc_cleanup(struct mlx5e_priv *priv); + +int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, + struct tc_cls_flower_offload *f); +int mlx5e_delete_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f); + +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) +{ + return atomic_read(&priv->fts.tc.ht.nelems); +} + +#endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2c3fba0fff54..1ffc7cb6f78c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -109,12 +109,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, { struct mlx5e_priv *priv = netdev_priv(dev); int channel_ix = fallback(dev, skb); - int up = skb_vlan_tag_present(skb) ? - skb->vlan_tci >> VLAN_PRIO_SHIFT : - priv->default_vlan_prio; - int tc = netdev_get_prio_tc_map(dev, up); + int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ? + skb->vlan_tci >> VLAN_PRIO_SHIFT : 0; - return priv->channeltc_to_txq_map[channel_ix][tc]; + return priv->channeltc_to_txq_map[channel_ix][up]; } static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, @@ -179,6 +177,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) unsigned int skb_len = skb->len; u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; + unsigned int num_bytes; bool bf = false; u16 headlen; u16 ds_cnt; @@ -187,9 +186,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) memset(wqe, 0, sizeof(*wqe)); - if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) - eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; - else + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; + if (skb->encapsulation) { + eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | + MLX5_ETH_WQE_L4_INNER_CSUM; + sq->stats.csum_offload_inner++; + } else { + eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; + } + } else sq->stats.csum_offload_none++; if (sq->cc != sq->prev_cc) { @@ -198,24 +204,30 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) } if (skb_is_gso(skb)) { - u32 payload_len; - eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); opcode = MLX5_OPCODE_LSO; - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - payload_len = skb->len - ihs; - wi->num_bytes = skb->len + - (skb_shinfo(skb)->gso_segs - 1) * ihs; - sq->stats.tso_packets++; - sq->stats.tso_bytes += payload_len; + + if (skb->encapsulation) { + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + sq->stats.tso_inner_packets++; + sq->stats.tso_inner_bytes += skb->len - ihs; + } else { + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + sq->stats.tso_packets++; + sq->stats.tso_bytes += skb->len - ihs; + } + + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; } else { bf = sq->bf_budget && !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); - wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } + wi->num_bytes = num_bytes; + if (skb_vlan_tag_present(skb)) { mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, &skb_len); @@ -293,7 +305,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { int bf_sz = 0; - if (bf && sq->uar_bf_map) + if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state)) bf_sz = wi->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; @@ -307,6 +319,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->bf_budget = bf ? sq->bf_budget - 1 : 0; sq->stats.packets++; + sq->stats.bytes += num_bytes; return NETDEV_TX_OK; dma_unmap_wqe_err: @@ -326,7 +339,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) return mlx5e_sq_xmit(sq, skb); } -bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq *sq; u32 dma_fifo_cc; @@ -335,10 +348,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) u16 sqcc; int i; - /* avoid accessing cq (dma coherent memory) if not needed */ - if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return false; - sq = container_of(cq, struct mlx5e_sq, cq); npkts = 0; @@ -402,7 +411,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) npkts++; nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; - dev_kfree_skb(skb); + napi_consume_skb(skb, napi_budget); } while (!last_wqe); } @@ -422,10 +431,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) netif_tx_wake_queue(sq->txq); sq->stats.wake++; } - if (i == MLX5E_TX_CQ_POLL_BUDGET) { - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return true; - } - return false; + return (i == MLX5E_TX_CQ_POLL_BUDGET); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 4ac8d716dbdd..9bb4395aceeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -60,7 +60,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); for (i = 0; i < c->num_tc; i++) - busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); + busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); busy |= work_done == budget; @@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); barrier(); napi_schedule(cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 647a3ca2c2a9..18fccec72c5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -442,6 +442,11 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); +u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx) +{ + return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector; +} + int mlx5_eq_init(struct mlx5_core_dev *dev) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a9894d2e8e26..f46f1db0fc00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -218,19 +218,22 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, match_value); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); - in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); - list_for_each_entry(dst, &fte->node.children, node.list) { - unsigned int id; - - MLX5_SET(dest_format_struct, in_dests, destination_type, - dst->dest_attr.type); - if (dst->dest_attr.type == - MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) - id = dst->dest_attr.ft->id; - else - id = dst->dest_attr.tir_num; - MLX5_SET(dest_format_struct, in_dests, destination_id, id); - in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + list_for_each_entry(dst, &fte->node.children, node.list) { + unsigned int id; + + MLX5_SET(dest_format_struct, in_dests, destination_type, + dst->dest_attr.type); + if (dst->dest_attr.type == + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { + id = dst->dest_attr.ft->id; + } else { + id = dst->dest_attr.tir_num; + } + MLX5_SET(dest_format_struct, in_dests, destination_id, id); + in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + } } memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, inlen, out, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6f68dba8d7ed..5121be4675d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -73,10 +73,13 @@ #define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_MAX_FT) -#define KERNEL_MAX_FT 2 -#define KERNEL_NUM_PRIOS 1 +#define KERNEL_MAX_FT 3 +#define KERNEL_NUM_PRIOS 2 #define KENREL_MIN_LEVEL 2 +#define ANCHOR_MAX_FT 1 +#define ANCHOR_NUM_PRIOS 1 +#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) struct node_caps { size_t arr_sz; long *caps; @@ -92,7 +95,7 @@ static struct init_tree_node { int max_ft; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 3, + .ar_size = 4, .children = (struct init_tree_node[]) { ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), @@ -108,6 +111,8 @@ static struct init_tree_node { FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))), + ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))), } }; @@ -196,8 +201,10 @@ static void tree_put_node(struct fs_node *node) static int tree_remove_node(struct fs_node *node) { - if (atomic_read(&node->refcount) > 1) - return -EPERM; + if (atomic_read(&node->refcount) > 1) { + atomic_dec(&node->refcount); + return -EEXIST; + } tree_put_node(node); return 0; } @@ -360,8 +367,13 @@ static void del_rule(struct fs_node *node) memcpy(match_value, fte->val, sizeof(fte->val)); fs_get_obj(ft, fg->node.parent); list_del(&rule->node.list); - fte->dests_size--; - if (fte->dests_size) { + if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { + mutex_lock(&rule->dest_attr.ft->lock); + list_del(&rule->next_ft); + mutex_unlock(&rule->dest_attr.ft->lock); + } + if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && + --fte->dests_size) { err = mlx5_cmd_update_fte(dev, ft, fg->id, fte); if (err) @@ -465,6 +477,8 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, ft->node.type = FS_TYPE_FLOW_TABLE; ft->type = table_type; ft->max_fte = max_fte; + INIT_LIST_HEAD(&ft->fwd_rules); + mutex_init(&ft->lock); return ft; } @@ -601,9 +615,63 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio return err; } +static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct fs_fte *fte; + int err = 0; + + fs_get_obj(fte, rule->node.parent); + if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + return -EINVAL; + lock_ref_node(&fte->node); + fs_get_obj(fg, fte->node.parent); + fs_get_obj(ft, fg->node.parent); + + memcpy(&rule->dest_attr, dest, sizeof(*dest)); + err = mlx5_cmd_update_fte(get_dev(&ft->node), + ft, fg->id, fte); + unlock_ref_node(&fte->node); + + return err; +} + +/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */ +static int connect_fwd_rules(struct mlx5_core_dev *dev, + struct mlx5_flow_table *new_next_ft, + struct mlx5_flow_table *old_next_ft) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *iter; + int err = 0; + + /* new_next_ft and old_next_ft could be NULL only + * when we create/destroy the anchor flow table. + */ + if (!new_next_ft || !old_next_ft) + return 0; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = new_next_ft; + + mutex_lock(&old_next_ft->lock); + list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules); + mutex_unlock(&old_next_ft->lock); + list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) { + err = mlx5_modify_rule_destination(iter, &dest); + if (err) + pr_err("mlx5_core: failed to modify rule to point on flow table %d\n", + new_next_ft->id); + } + return 0; +} + static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct fs_prio *prio) { + struct mlx5_flow_table *next_ft; int err = 0; /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ @@ -612,6 +680,11 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table err = connect_prev_fts(dev, ft, prio); if (err) return err; + + next_ft = find_next_chained_ft(prio); + err = connect_fwd_rules(dev, ft, next_ft); + if (err) + return err; } if (MLX5_CAP_FLOWTABLE(dev, @@ -762,8 +835,10 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) if (!rule) return NULL; + INIT_LIST_HEAD(&rule->next_ft); rule->node.type = FS_TYPE_FLOW_DEST; - memcpy(&rule->dest_attr, dest, sizeof(*dest)); + if (dest) + memcpy(&rule->dest_attr, dest, sizeof(*dest)); return rule; } @@ -782,11 +857,17 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, return ERR_PTR(-ENOMEM); fs_get_obj(ft, fg->node.parent); - /* Add dest to dests list- added as first element after the head */ + /* Add dest to dests list- we need flow tables to be in the + * end of the list for forward to next prio rules. + */ tree_init_node(&rule->node, 1, del_rule); - list_add_tail(&rule->node.list, &fte->node.children); - fte->dests_size++; - if (fte->dests_size == 1) + if (dest && dest->type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) + list_add(&rule->node.list, &fte->node.children); + else + list_add_tail(&rule->node.list, &fte->node.children); + if (dest) + fte->dests_size++; + if (fte->dests_size == 1 || !dest) err = mlx5_cmd_create_fte(get_dev(&ft->node), ft, fg->id, fte); else @@ -802,7 +883,8 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, free_rule: list_del(&rule->node.list); kfree(rule); - fte->dests_size--; + if (dest) + fte->dests_size--; return ERR_PTR(err); } @@ -903,6 +985,25 @@ out: return fg; } +static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_rule *rule; + + list_for_each_entry(rule, &fte->node.children, node.list) { + if (rule->dest_attr.type == dest->type) { + if ((dest->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + dest->vport_num == rule->dest_attr.vport_num) || + (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && + dest->ft == rule->dest_attr.ft) || + (dest->type == MLX5_FLOW_DESTINATION_TYPE_TIR && + dest->tir_num == rule->dest_attr.tir_num)) + return rule; + } + } + return NULL; +} + static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, u32 *match_value, u8 action, @@ -919,6 +1020,13 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); if (compare_match_value(&fg->mask, match_value, &fte->val) && action == fte->action && flow_tag == fte->flow_tag) { + rule = find_flow_rule(fte, dest); + if (rule) { + atomic_inc(&rule->node.refcount); + unlock_ref_node(&fte->node); + unlock_ref_node(&fg->node); + return rule; + } rule = add_rule_fte(fte, fg, dest); unlock_ref_node(&fte->node); if (IS_ERR(rule)) @@ -984,18 +1092,21 @@ static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, return rule; } -struct mlx5_flow_rule * -mlx5_add_flow_rule(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, - u32 action, - u32 flow_tag, - struct mlx5_flow_destination *dest) +static struct mlx5_flow_rule * +_mlx5_add_flow_rule(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u32 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) { struct mlx5_flow_group *g; struct mlx5_flow_rule *rule; + if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest) + return ERR_PTR(-EINVAL); + nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); fs_for_each_fg(g, ft) if (compare_match_criteria(g->mask.match_criteria_enable, @@ -1014,6 +1125,63 @@ unlock: unlock_ref_node(&ft->node); return rule; } + +static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) +{ + return ((ft->type == FS_FT_NIC_RX) && + (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs))); +} + +struct mlx5_flow_rule * +mlx5_add_flow_rule(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u32 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + struct mlx5_flow_destination gen_dest; + struct mlx5_flow_table *next_ft = NULL; + struct mlx5_flow_rule *rule = NULL; + u32 sw_action = action; + struct fs_prio *prio; + + fs_get_obj(prio, ft->node.parent); + if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { + if (!fwd_next_prio_supported(ft)) + return ERR_PTR(-EOPNOTSUPP); + if (dest) + return ERR_PTR(-EINVAL); + mutex_lock(&root->chain_lock); + next_ft = find_next_chained_ft(prio); + if (next_ft) { + gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + gen_dest.ft = next_ft; + dest = &gen_dest; + action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + } else { + mutex_unlock(&root->chain_lock); + return ERR_PTR(-EOPNOTSUPP); + } + } + + rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, + match_value, action, flow_tag, dest); + + if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { + if (!IS_ERR_OR_NULL(rule) && + (list_empty(&rule->next_ft))) { + mutex_lock(&next_ft->lock); + list_add(&rule->next_ft, &next_ft->fwd_rules); + mutex_unlock(&next_ft->lock); + rule->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + } + mutex_unlock(&root->chain_lock); + } + return rule; +} EXPORT_SYMBOL(mlx5_add_flow_rule); void mlx5_del_flow_rule(struct mlx5_flow_rule *rule) @@ -1077,6 +1245,10 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft) return 0; next_ft = find_next_chained_ft(prio); + err = connect_fwd_rules(dev, next_ft, ft); + if (err) + return err; + err = connect_prev_fts(dev, next_ft, prio); if (err) mlx5_core_warn(dev, "Failed to disconnect flow table %d\n", @@ -1126,6 +1298,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, case MLX5_FLOW_NAMESPACE_BYPASS: case MLX5_FLOW_NAMESPACE_KERNEL: case MLX5_FLOW_NAMESPACE_LEFTOVERS: + case MLX5_FLOW_NAMESPACE_ANCHOR: prio = type; break; case MLX5_FLOW_NAMESPACE_FDB: @@ -1351,6 +1524,25 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) } } +#define ANCHOR_PRIO 0 +#define ANCHOR_SIZE 1 +static int create_anchor_flow_table(struct mlx5_core_dev + *dev) +{ + struct mlx5_flow_namespace *ns = NULL; + struct mlx5_flow_table *ft; + + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); + if (!ns) + return -EINVAL; + ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE); + if (IS_ERR(ft)) { + mlx5_core_err(dev, "Failed to create last anchor flow table"); + return PTR_ERR(ft); + } + return 0; +} + static int init_root_ns(struct mlx5_core_dev *dev) { @@ -1363,6 +1555,9 @@ static int init_root_ns(struct mlx5_core_dev *dev) set_prio_attrs(dev->priv.root_ns); + if (create_anchor_flow_table(dev)) + goto cleanup; + return 0; cleanup: @@ -1392,6 +1587,15 @@ static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, root_ns = NULL; } +static void destroy_flow_tables(struct fs_prio *prio) +{ + struct mlx5_flow_table *iter; + struct mlx5_flow_table *tmp; + + fs_for_each_ft_safe(iter, tmp, prio) + mlx5_destroy_flow_table(iter); +} + static void cleanup_root_ns(struct mlx5_core_dev *dev) { struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; @@ -1420,6 +1624,7 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev) list); fs_get_obj(obj_iter_prio2, iter_prio2); + destroy_flow_tables(obj_iter_prio2); if (tree_remove_node(iter_prio2)) { mlx5_core_warn(dev, "Priority %d wasn't destroyed, refcount > 1\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 00245fd7e4bc..f37a6248a27b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -68,6 +68,11 @@ struct fs_node { struct mlx5_flow_rule { struct fs_node node; struct mlx5_flow_destination dest_attr; + /* next_ft should be accessed under chain_lock and only of + * destination type is FWD_NEXT_fT. + */ + struct list_head next_ft; + u32 sw_action; }; /* Type of children is mlx5_flow_group */ @@ -82,6 +87,10 @@ struct mlx5_flow_table { unsigned int required_groups; unsigned int num_groups; } autogroup; + /* Protect fwd_rules */ + struct mutex lock; + /* FWD rules that point on this flow table */ + struct list_head fwd_rules; }; /* Type of children is mlx5_flow_rule */ @@ -142,6 +151,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); #define fs_list_for_each_entry(pos, root) \ list_for_each_entry(pos, root, node.list) +#define fs_list_for_each_entry_safe(pos, tmp, root) \ + list_for_each_entry_safe(pos, tmp, root, node.list) + #define fs_for_each_ns_or_ft_reverse(pos, prio) \ list_for_each_entry_reverse(pos, &(prio)->node.children, list) @@ -157,6 +169,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); #define fs_for_each_ft(pos, prio) \ fs_list_for_each_entry(pos, &(prio)->node.children) +#define fs_for_each_ft_safe(pos, tmp, prio) \ + fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children) + #define fs_for_each_fg(pos, ft) \ fs_list_for_each_entry(pos, &(ft)->node.children) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa1ab4702385..75c7ae6a5cc4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -98,88 +98,55 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { int err; - err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); - if (err) - return err; - - err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); if (err) return err; if (MLX5_CAP_GEN(dev, eth_net_offloads)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, - HCA_CAP_OPMOD_GET_CUR); - if (err) - return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, - HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS); if (err) return err; } if (MLX5_CAP_GEN(dev, pg)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, - HCA_CAP_OPMOD_GET_CUR); - if (err) - return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, - HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP); if (err) return err; } if (MLX5_CAP_GEN(dev, atomic)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, - HCA_CAP_OPMOD_GET_CUR); - if (err) - return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, - HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); if (err) return err; } if (MLX5_CAP_GEN(dev, roce)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, - HCA_CAP_OPMOD_GET_CUR); - if (err) - return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, - HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE); if (err) return err; } if (MLX5_CAP_GEN(dev, nic_flow_table)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, - HCA_CAP_OPMOD_GET_CUR); - if (err) - return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, - HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE); if (err) return err; } if (MLX5_CAP_GEN(dev, vport_group_manager) && MLX5_CAP_GEN(dev, eswitch_flow_table)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, - HCA_CAP_OPMOD_GET_CUR); - if (err) - return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, - HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); if (err) return err; } if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, - HCA_CAP_OPMOD_GET_CUR); + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); if (err) return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, - HCA_CAP_OPMOD_GET_MAX); + } + + if (MLX5_CAP_GEN(dev, vector_calc)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 1545a944c309..3f3b2fae4991 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -341,8 +341,9 @@ static u16 to_fw_pkey_sz(u32 size) } } -int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, - enum mlx5_cap_mode cap_mode) +static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, + enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode) { u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); @@ -392,6 +393,16 @@ query_ex: return err; } +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) +{ + int ret; + + ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); + if (ret) + return ret; + return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); +} + static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod) { u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; @@ -419,8 +430,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) int err; if (MLX5_CAP_GEN(dev, atomic)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, - HCA_CAP_OPMOD_GET_CUR); + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); if (err) return err; } else { @@ -462,11 +472,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) if (!set_ctx) goto query_ex; - err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); - if (err) - goto query_ex; - - err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); if (err) goto query_ex; @@ -767,22 +773,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) return -ENOTSUPP; } -static int map_bf_area(struct mlx5_core_dev *dev) -{ - resource_size_t bf_start = pci_resource_start(dev->pdev, 0); - resource_size_t bf_len = pci_resource_len(dev->pdev, 0); - - dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); - - return dev->priv.bf_mapping ? 0 : -ENOMEM; -} - -static void unmap_bf_area(struct mlx5_core_dev *dev) -{ - if (dev->priv.bf_mapping) - io_mapping_free(dev->priv.bf_mapping); -} - static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; @@ -1103,21 +1093,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_stop_eqs; } - if (map_bf_area(dev)) - dev_err(&pdev->dev, "Failed to map blue flame area\n"); - err = mlx5_irq_set_affinity_hints(dev); - if (err) { + if (err) dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); - goto err_unmap_bf_area; - } MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); mlx5_init_cq_table(dev); mlx5_init_qp_table(dev); mlx5_init_srq_table(dev); - mlx5_init_mr_table(dev); + mlx5_init_mkey_table(dev); err = mlx5_init_fs(dev); if (err) { @@ -1164,15 +1149,11 @@ err_sriov: err_reg_dev: mlx5_cleanup_fs(dev); err_fs: - mlx5_cleanup_mr_table(dev); + mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_irq_clear_affinity_hints(dev); - -err_unmap_bf_area: - unmap_bf_area(dev); - free_comp_eqs(dev); err_stop_eqs: @@ -1237,12 +1218,11 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) #endif mlx5_cleanup_fs(dev); - mlx5_cleanup_mr_table(dev); + mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_irq_clear_affinity_hints(dev); - unmap_bf_area(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 0336847ec9a1..0b0b226c789e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -99,6 +99,7 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); +u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 6fa22b51e460..77a7293921d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -36,25 +36,26 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" -void mlx5_init_mr_table(struct mlx5_core_dev *dev) +void mlx5_init_mkey_table(struct mlx5_core_dev *dev) { - struct mlx5_mr_table *table = &dev->priv.mr_table; + struct mlx5_mkey_table *table = &dev->priv.mkey_table; memset(table, 0, sizeof(*table)); rwlock_init(&table->lock); INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); } -void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev) +void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev) { } -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, struct mlx5_create_mkey_mbox_in *in, int inlen, mlx5_cmd_cbk_t callback, void *context, struct mlx5_create_mkey_mbox_out *out) { - struct mlx5_mr_table *table = &dev->priv.mr_table; + struct mlx5_mkey_table *table = &dev->priv.mkey_table; struct mlx5_create_mkey_mbox_out lout; int err; u8 key; @@ -83,34 +84,35 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, return mlx5_cmd_status_to_err(&lout.hdr); } - mr->iova = be64_to_cpu(in->seg.start_addr); - mr->size = be64_to_cpu(in->seg.len); - mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; - mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; + mkey->iova = be64_to_cpu(in->seg.start_addr); + mkey->size = be64_to_cpu(in->seg.len); + mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; + mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", - be32_to_cpu(lout.mkey), key, mr->key); + be32_to_cpu(lout.mkey), key, mkey->key); - /* connect to MR tree */ + /* connect to mkey tree */ write_lock_irq(&table->lock); - err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); + err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey); write_unlock_irq(&table->lock); if (err) { - mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n", - mlx5_base_mkey(mr->key), err); - mlx5_core_destroy_mkey(dev, mr); + mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n", + mlx5_base_mkey(mkey->key), err); + mlx5_core_destroy_mkey(dev, mkey); } return err; } EXPORT_SYMBOL(mlx5_core_create_mkey); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey) { - struct mlx5_mr_table *table = &dev->priv.mr_table; + struct mlx5_mkey_table *table = &dev->priv.mkey_table; struct mlx5_destroy_mkey_mbox_in in; struct mlx5_destroy_mkey_mbox_out out; - struct mlx5_core_mr *deleted_mr; + struct mlx5_core_mkey *deleted_mkey; unsigned long flags; int err; @@ -118,16 +120,16 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) memset(&out, 0, sizeof(out)); write_lock_irqsave(&table->lock, flags); - deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); + deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key)); write_unlock_irqrestore(&table->lock, flags); - if (!deleted_mr) { - mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", - mlx5_base_mkey(mr->key)); + if (!deleted_mkey) { + mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n", + mlx5_base_mkey(mkey->key)); return -ENOENT; } in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); - in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key)); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; @@ -139,7 +141,7 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) } EXPORT_SYMBOL(mlx5_core_destroy_mkey); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, struct mlx5_query_mkey_mbox_out *out, int outlen) { struct mlx5_query_mkey_mbox_in in; @@ -149,7 +151,7 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, memset(out, 0, outlen); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); - in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key)); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); if (err) return err; @@ -161,7 +163,7 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, } EXPORT_SYMBOL(mlx5_core_query_mkey); -int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, u32 *mkey) { struct mlx5_query_special_ctxs_mbox_in in; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index a87e773e93f3..ae378c575deb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/port.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" @@ -324,6 +325,29 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap); +int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, + u8 port_num, void *out, size_t sz) +{ + u32 *in; + int err; + + in = mlx5_vzalloc(sz); + if (!in) { + err = -ENOMEM; + return err; + } + + MLX5_SET(ppcnt_reg, in, local_port, port_num); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP); + err = mlx5_core_access_reg(dev, in, sz, out, + sz, MLX5_REG_PPCNT, 0, 0); + + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt); + int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; @@ -363,3 +387,223 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev, return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_pause); + +int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx); + MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx); + MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx); + MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 1); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_pfc); + +int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 0); + if (err) + return err; + + if (pfc_en_tx) + *pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx); + + if (pfc_en_rx) + *pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); + +int mlx5_max_tc(struct mlx5_core_dev *mdev) +{ + u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; + + return num_tc - 1; +} + +int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc) +{ + u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + int err; + int i; + + memset(in, 0, sizeof(in)); + for (i = 0; i < 8; i++) { + if (prio_tc[i] > mlx5_max_tc(mdev)) + return -EINVAL; + + MLX5_SET(qtct_reg, in, prio, i); + MLX5_SET(qtct_reg, in, tclass, prio_tc[i]); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_QTCT, 0, 1); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc); + +static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + + if (!MLX5_CAP_GEN(mdev, ets)) + return -ENOTSUPP; + + return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), + MLX5_REG_QETCR, 0, 1); +} + +static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, + int outlen) +{ + u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + + if (!MLX5_CAP_GEN(mdev, ets)) + return -ENOTSUPP; + + memset(in, 0, sizeof(in)); + return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, + MLX5_REG_QETCR, 0, 0); +} + +int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) +{ + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + int i; + + memset(in, 0, sizeof(in)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1); + MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]); + } + + return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); + +int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) +{ + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + int i; + + memset(in, 0, sizeof(in)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1); + MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]); + } + + return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc); + +int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_units) +{ + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + void *ets_tcn_conf; + int i; + + memset(in, 0, sizeof(in)); + + MLX5_SET(qetc_reg, in, port_number, 1); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, in, tc_configuration[i]); + + MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, r, 1); + MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units, + max_bw_units[i]); + MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value, + max_bw_value[i]); + } + + return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); +} +EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit); + +int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_units) +{ + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; + void *ets_tcn_conf; + int err; + int i; + + err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); + if (err) + return err; + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]); + + max_bw_value[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, + max_bw_value); + max_bw_units[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, + max_bw_units); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit); + +int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode) +{ + u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)]; + u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL); + MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1); + MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), + out, sizeof(out)); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_wol); + +int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) +{ + u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)]; + u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL); + + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), + out, sizeof(out)); + + if (!err) + *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_wol); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index eb05c845ece9..8ba080e441a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -226,7 +226,8 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) return 0; } -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, + bool map_wc) { phys_addr_t pfn; phys_addr_t uar_bar_start; @@ -240,20 +241,26 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) uar_bar_start = pci_resource_start(mdev->pdev, 0); pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) { - mlx5_core_warn(mdev, "ioremap() failed, %d\n", err); - err = -ENOMEM; - goto err_free_uar; - } - if (mdev->priv.bf_mapping) - uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping, - uar->index << PAGE_SHIFT); + if (map_wc) { + uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->bf_map) { + mlx5_core_warn(mdev, "ioremap_wc() failed\n"); + uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) + goto err_free_uar; + } + } else { + uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) + goto err_free_uar; + } return 0; err_free_uar: + mlx5_core_warn(mdev, "ioremap() failed\n"); + err = -ENOMEM; mlx5_cmd_free_uar(mdev, uar->index); return err; @@ -262,8 +269,8 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) { - io_mapping_unmap(uar->bf_map); iounmap(uar->map); + iounmap(uar->bf_map); mlx5_cmd_free_uar(mdev, uar->index); } EXPORT_SYMBOL(mlx5_unmap_free_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index c7398b95aecd..bd518405859e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -850,3 +850,111 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev) return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); } EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce); + +int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz) +{ + int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in); + int is_group_manager; + void *in; + int err; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + in = mlx5_vzalloc(in_sz); + if (!in) { + err = -ENOMEM; + return err; + } + + MLX5_SET(query_vport_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_COUNTER); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_vport_counter_in, in, other_vport, 1); + MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1); + } else { + err = -EPERM; + goto free; + } + } + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_vport_counter_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto free; + err = mlx5_cmd_status_to_err_v2(out); + +free: + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter); + +int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + int vf, + struct mlx5_hca_vport_context *req) +{ + int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in); + u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)]; + int is_group_manager; + void *in; + int err; + void *ctx; + + mlx5_core_dbg(dev, "vf %d\n", vf); + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + memset(out, 0, sizeof(out)); + MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf); + } else { + err = -EPERM; + goto ex; + } + } + + if (MLX5_CAP_GEN(dev, num_ports) > 1) + MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num); + + ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context); + MLX5_SET(hca_vport_context, ctx, field_select, req->field_select); + MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware); + MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi); + MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw); + MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy); + MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state); + MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state); + MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); + MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); + MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); + MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm); + MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2); + MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm); + MLX5_SET(hca_vport_context, ctx, lid, req->lid); + MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply); + MLX5_SET(hca_vport_context, ctx, lmc, req->lmc); + MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout); + MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid); + MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl); + MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter); + MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); + if (err) + goto ex; + + err = mlx5_cmd_status_to_err_v2(out); + +ex: + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c new file mode 100644 index 000000000000..9f10df25f3cd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" +#include "vxlan.h" + +void mlx5e_vxlan_init(struct mlx5e_priv *priv) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + + spin_lock_init(&vxlan_db->lock); + INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC); +} + +static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) +{ + struct mlx5_outbox_hdr *hdr; + int err; + + u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)]; + u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(add_vxlan_udp_dport_in, in, opcode, + MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); + MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + hdr = (struct mlx5_outbox_hdr *)out; + return hdr->status ? -ENOMEM : 0; +} + +static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) +{ + u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]; + u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)]; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, + MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); + MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, + sizeof(out)); +} + +struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + + spin_lock(&vxlan_db->lock); + vxlan = radix_tree_lookup(&vxlan_db->tree, port); + spin_unlock(&vxlan_db->lock); + + return vxlan; +} + +int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + int err; + + err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port); + if (err) + return err; + + vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); + if (!vxlan) { + err = -ENOMEM; + goto err_delete_port; + } + + vxlan->udp_port = port; + + spin_lock_irq(&vxlan_db->lock); + err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); + spin_unlock_irq(&vxlan_db->lock); + if (err) + goto err_free; + + return 0; + +err_free: + kfree(vxlan); +err_delete_port: + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + return err; +} + +static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + + spin_lock_irq(&vxlan_db->lock); + vxlan = radix_tree_delete(&vxlan_db->tree, port); + spin_unlock_irq(&vxlan_db->lock); + + if (!vxlan) + return; + + mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); + + kfree(vxlan); +} + +void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) +{ + if (!mlx5e_vxlan_lookup_port(priv, port)) + return; + + __mlx5e_vxlan_core_del_port(priv, port); +} + +void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + unsigned int port = 0; + + spin_lock_irq(&vxlan_db->lock); + while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { + port = vxlan->udp_port; + spin_unlock_irq(&vxlan_db->lock); + __mlx5e_vxlan_core_del_port(priv, (u16)port); + spin_lock_irq(&vxlan_db->lock); + } + spin_unlock_irq(&vxlan_db->lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h new file mode 100644 index 000000000000..a01685056ab1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_VXLAN_H__ +#define __MLX5_VXLAN_H__ + +#include <linux/mlx5/driver.h> +#include "en.h" + +struct mlx5e_vxlan { + u16 udp_port; +}; + +static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) +{ + return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && + mlx5_core_is_pf(mdev)); +} + +void mlx5e_vxlan_init(struct mlx5e_priv *priv); +int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); +void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); +struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); +void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); + +#endif /* __MLX5_VXLAN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index ce26adcb4988..2ad7f67854d5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -4,6 +4,7 @@ config MLXSW_CORE tristate "Mellanox Technologies Switch ASICs support" + depends on MAY_USE_DEVLINK ---help--- This driver supports Mellanox Technologies Switch ASICs family. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 22379eb8e924..f69f6280519f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -56,6 +56,7 @@ #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/byteorder.h> +#include <net/devlink.h> #include "core.h" #include "item.h" @@ -784,6 +785,38 @@ static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) debugfs_remove_recursive(mlxsw_core->dbg_dir); } +static int mlxsw_devlink_port_split(struct devlink *devlink, + unsigned int port_index, + unsigned int count) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + + if (port_index >= MLXSW_PORT_MAX_PORTS) + return -EINVAL; + if (!mlxsw_core->driver->port_split) + return -EOPNOTSUPP; + return mlxsw_core->driver->port_split(mlxsw_core->driver_priv, + port_index, count); +} + +static int mlxsw_devlink_port_unsplit(struct devlink *devlink, + unsigned int port_index) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + + if (port_index >= MLXSW_PORT_MAX_PORTS) + return -EINVAL; + if (!mlxsw_core->driver->port_unsplit) + return -EOPNOTSUPP; + return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv, + port_index); +} + +static const struct devlink_ops mlxsw_devlink_ops = { + .port_split = mlxsw_devlink_port_split, + .port_unsplit = mlxsw_devlink_port_unsplit, +}; + int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const struct mlxsw_bus *mlxsw_bus, void *bus_priv) @@ -791,6 +824,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const char *device_kind = mlxsw_bus_info->device_kind; struct mlxsw_core *mlxsw_core; struct mlxsw_driver *mlxsw_driver; + struct devlink *devlink; size_t alloc_size; int err; @@ -798,12 +832,13 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (!mlxsw_driver) return -EINVAL; alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; - mlxsw_core = kzalloc(alloc_size, GFP_KERNEL); - if (!mlxsw_core) { + devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); + if (!devlink) { err = -ENOMEM; - goto err_core_alloc; + goto err_devlink_alloc; } + mlxsw_core = devlink_priv(devlink); INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); INIT_LIST_HEAD(&mlxsw_core->event_listener_list); mlxsw_core->driver = mlxsw_driver; @@ -841,6 +876,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_hwmon_init; + err = devlink_register(devlink, mlxsw_bus_info->dev); + if (err) + goto err_devlink_register; + err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, mlxsw_bus_info); if (err) @@ -855,6 +894,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_debugfs_init: mlxsw_core->driver->fini(mlxsw_core->driver_priv); err_driver_init: + devlink_unregister(devlink); +err_devlink_register: err_hwmon_init: mlxsw_emad_fini(mlxsw_core); err_emad_init: @@ -864,8 +905,8 @@ err_bus_init: err_alloc_lag_mapping: free_percpu(mlxsw_core->pcpu_stats); err_alloc_stats: - kfree(mlxsw_core); -err_core_alloc: + devlink_free(devlink); +err_devlink_alloc: mlxsw_core_driver_put(device_kind); return err; } @@ -874,14 +915,16 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register); void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) { const char *device_kind = mlxsw_core->bus_info->device_kind; + struct devlink *devlink = priv_to_devlink(mlxsw_core); mlxsw_core_debugfs_fini(mlxsw_core); mlxsw_core->driver->fini(mlxsw_core->driver_priv); + devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); kfree(mlxsw_core->lag.mapping); free_percpu(mlxsw_core->pcpu_stats); - kfree(mlxsw_core); + devlink_free(devlink); mlxsw_core_driver_put(device_kind); } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index a01723600f0a..c73d1c0792a6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -186,6 +186,8 @@ struct mlxsw_driver { int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info); void (*fini)(void *driver_priv); + int (*port_split)(void *driver_priv, u8 local_port, unsigned int count); + int (*port_unsplit)(void *driver_priv, u8 local_port); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); u8 txhdr_len; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c071077aafbd..7f4173c8eda3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) { int index = q->producer_counter & (q->count - 1); - if ((q->producer_counter - q->consumer_counter) == q->count) + if ((u16) (q->producer_counter - q->consumer_counter) == q->count) return NULL; return mlxsw_pci_queue_elem_info_get(q, index); } @@ -1681,11 +1681,18 @@ static const struct mlxsw_bus mlxsw_pci_bus = { static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) { + unsigned long end; + mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); - /* Current firware does not let us know when the reset is done. - * So we just wait here for constant time and hope for the best. - */ - msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + wmb(); /* reset needs to be written before we read control register */ + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + do { + u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); + + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) + break; + cond_resched(); + } while (time_before(jiffies, end)); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 912106054ff2..d942a3e6fa41 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -61,6 +61,9 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_FW_READY 0xA1844 +#define MLXSW_PCI_FW_READY_MASK 0xFF +#define MLXSW_PCI_FW_READY_MAGIC 0x5E #define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000 #define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200 diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index 726f5435b32f..f33b997f2b61 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -49,7 +49,7 @@ #define MLXSW_PORT_MID 0xd000 #define MLXSW_PORT_MAX_PHY_PORTS 0x40 -#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS +#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1) #define MLXSW_PORT_DEVID_BITS_OFFSET 10 #define MLXSW_PORT_PHY_BITS_OFFSET 4 @@ -59,6 +59,8 @@ #define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) +#define MLXSW_PORT_MODULE_MAX_WIDTH 4 + enum mlxsw_port_admin_status { MLXSW_PORT_ADMIN_STATUS_UP = 1, MLXSW_PORT_ADMIN_STATUS_DOWN = 2, diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index bb77e2207804..ffe4c0305733 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, } } +/* SPAFT - Switch Port Acceptable Frame Types + * ------------------------------------------ + * The Switch Port Acceptable Frame Types register configures the frame + * admittance of the port. + */ +#define MLXSW_REG_SPAFT_ID 0x2010 +#define MLXSW_REG_SPAFT_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_spaft = { + .id = MLXSW_REG_SPAFT_ID, + .len = MLXSW_REG_SPAFT_LEN, +}; + +/* reg_spaft_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is not supported (all tag types are allowed). + */ +MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8); + +/* reg_spaft_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8); + +/* reg_spaft_allow_untagged + * When set, untagged frames on the ingress are allowed (default). + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1); + +/* reg_spaft_allow_prio_tagged + * When set, priority tagged frames on the ingress are allowed (default). + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1); + +/* reg_spaft_allow_tagged + * When set, tagged frames on the ingress are allowed (default). + * Access: RW + */ +MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1); + +static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port, + bool allow_untagged) +{ + MLXSW_REG_ZERO(spaft, payload); + mlxsw_reg_spaft_local_port_set(payload, local_port); + mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged); + mlxsw_reg_spaft_allow_prio_tagged_set(payload, true); + mlxsw_reg_spaft_allow_tagged_set(payload, true); +} + /* SFGC - Switch Flooding Group Configuration * ------------------------------------------ * The following register controls the association of flooding tables and MIDs @@ -3203,6 +3259,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SPVID"; case MLXSW_REG_SPVM_ID: return "SPVM"; + case MLXSW_REG_SPAFT_ID: + return "SPAFT"; case MLXSW_REG_SFGC_ID: return "SFGC"; case MLXSW_REG_SFTR_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 217856bdd400..4afbc3e9e381 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -49,6 +49,7 @@ #include <linux/jiffies.h> #include <linux/bitops.h> #include <linux/list.h> +#include <net/devlink.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -304,21 +305,47 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); } -static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, - bool *p_usable) +static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *p_module, + u8 *p_width) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; int err; - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); if (err) return err; - *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; + *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); return 0; } +static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 module, u8 width, u8 lane) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int i; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + mlxsw_reg_pmlp_width_set(pmlp_pl, width); + for (i = 0; i < width; i++) { + mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); + mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); +} + +static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + mlxsw_reg_pmlp_width_set(pmlp_pl, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); +} + static int mlxsw_sp_port_open(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -1273,6 +1300,18 @@ static u32 mlxsw_sp_to_ptys_speed(u32 speed) return ptys_proto; } +static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + } + return ptys_proto; +} + static int mlxsw_sp_port_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1349,11 +1388,27 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .set_settings = mlxsw_sp_port_set_settings, }; -static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) +static int +mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_admin; + + eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, + eth_proto_admin); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); +} + +static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, + bool split, u8 module, u8 width) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_port *mlxsw_sp_port; + struct devlink_port *devlink_port; struct net_device *dev; - bool usable; size_t bytes; int err; @@ -1364,6 +1419,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; mlxsw_sp_port->local_port = local_port; + mlxsw_sp_port->split = split; bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); if (!mlxsw_sp_port->active_vlans) { @@ -1404,17 +1460,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) */ dev->hard_header_len += MLXSW_TXHDR_LEN; - err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); + devlink_port = &mlxsw_sp_port->devlink_port; + if (mlxsw_sp_port->split) + devlink_port_split_set(devlink_port, module); + err = devlink_port_register(devlink, devlink_port, local_port); if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", - mlxsw_sp_port->local_port); - goto err_port_module_check; - } - - if (!usable) { - dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n", mlxsw_sp_port->local_port); - goto port_not_usable; + goto err_devlink_port_register; } err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); @@ -1431,6 +1484,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) goto err_port_swid_set; } + err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", + mlxsw_sp_port->local_port); + goto err_port_speed_by_width_set; + } + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", @@ -1457,6 +1517,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) goto err_register_netdev; } + devlink_port_type_eth_set(devlink_port, dev); + err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); if (err) goto err_port_vlan_init; @@ -1470,10 +1532,11 @@ err_register_netdev: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: +err_port_speed_by_width_set: err_port_swid_set: err_port_system_port_mapping_set: -port_not_usable: -err_port_module_check: + devlink_port_unregister(&mlxsw_sp_port->devlink_port); +err_devlink_port_register: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: @@ -1485,6 +1548,28 @@ err_port_active_vlans_alloc: return err; } +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, + bool split, u8 module, u8 width, u8 lane) +{ + int err; + + err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, + lane); + if (err) + return err; + + err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, + width); + if (err) + goto err_port_create; + + return 0; + +err_port_create: + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); + return err; +} + static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; @@ -1505,12 +1590,19 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct devlink_port *devlink_port; if (!mlxsw_sp_port) return; + mlxsw_sp->ports[local_port] = NULL; + devlink_port = &mlxsw_sp_port->devlink_port; + devlink_port_type_clear(devlink_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + devlink_port_unregister(devlink_port); mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); + mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); @@ -1529,6 +1621,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { size_t alloc_size; + u8 module, width; int i; int err; @@ -1538,19 +1631,158 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) return -ENOMEM; for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { - err = mlxsw_sp_port_create(mlxsw_sp, i); + err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, + &width); + if (err) + goto err_port_module_info_get; + if (!width) + continue; + mlxsw_sp->port_to_module[i] = module; + err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); if (err) goto err_port_create; } return 0; err_port_create: +err_port_module_info_get: for (i--; i >= 1; i--) mlxsw_sp_port_remove(mlxsw_sp, i); kfree(mlxsw_sp->ports); return err; } +static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) +{ + u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; + + return local_port - offset; +} + +static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port; + u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; + u8 module, cur_width, base_port; + int i; + int err; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); + return -EINVAL; + } + + if (count != 2 && count != 4) { + netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, + &cur_width); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); + return err; + } + + if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { + netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); + return -EINVAL; + } + + /* Make sure we have enough slave (even) ports for the split. */ + if (count == 2) { + base_port = local_port; + if (mlxsw_sp->ports[base_port + 1]) { + netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + return -EINVAL; + } + } else { + base_port = mlxsw_sp_cluster_base_port_get(local_port); + if (mlxsw_sp->ports[base_port + 1] || + mlxsw_sp->ports[base_port + 3]) { + netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + return -EINVAL; + } + } + + for (i = 0; i < count; i++) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + + for (i = 0; i < count; i++) { + err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, + module, width, i * width); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); + goto err_port_create; + } + } + + return 0; + +err_port_create: + for (i--; i >= 0; i--) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + for (i = 0; i < count / 2; i++) { + module = mlxsw_sp->port_to_module[base_port + i * 2]; + mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, + module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); + } + return err; +} + +static int mlxsw_sp_port_unsplit(void *priv, u8 local_port) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port; + u8 module, cur_width, base_port; + unsigned int count; + int i; + int err; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); + return -EINVAL; + } + + if (!mlxsw_sp_port->split) { + netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, + &cur_width); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); + return err; + } + count = cur_width == 1 ? 4 : 2; + + base_port = mlxsw_sp_cluster_base_port_get(local_port); + + /* Determine which ports to remove. */ + if (count == 2 && local_port >= base_port + 2) + base_port = base_port + 2; + + for (i = 0; i < count; i++) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + + for (i = 0; i < count / 2; i++) { + module = mlxsw_sp->port_to_module[base_port + i * 2]; + err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, + module, MLXSW_PORT_MODULE_MAX_WIDTH, + 0); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); + } + + return 0; +} + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@ -1974,6 +2206,8 @@ static struct mlxsw_driver mlxsw_sp_driver = { .priv_size = sizeof(struct mlxsw_sp), .init = mlxsw_sp_init, .fini = mlxsw_sp_fini, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, .txhdr_construct = mlxsw_sp_txhdr_construct, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp_config_profile, @@ -2123,6 +2357,8 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@ -2356,9 +2592,7 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, if (mlxsw_sp_port->bridged) { mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); - - if (lag->ref_count == 1) - mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); } if (lag->ref_count == 1) { @@ -2746,6 +2980,13 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, goto err_vport_flood_set; } + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, + MLXSW_REG_SPMS_STATE_FORWARDING); + if (err) { + netdev_err(dev, "Failed to set STP state\n"); + goto err_port_stp_state_set; + } + if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) netdev_err(dev, "Failed to flush FDB\n"); @@ -2763,6 +3004,7 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, return 0; +err_port_stp_state_set: err_vport_flood_set: err_port_vid_learning_set: err_port_vid_to_fid_validate: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 7f42eb1c320e..4b8abaf06321 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -43,6 +43,7 @@ #include <linux/if_vlan.h> #include <linux/list.h> #include <net/switchdev.h> +#include <net/devlink.h> #include "port.h" #include "core.h" @@ -57,6 +58,10 @@ #define MLXSW_SP_MID_MAX 7000 +#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 + +#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ + struct mlxsw_sp_port; struct mlxsw_sp_upper { @@ -118,10 +123,13 @@ struct mlxsw_sp { #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 unsigned int interval; /* ms */ } fdb_notify; +#define MLXSW_SP_MIN_AGEING_TIME 10 +#define MLXSW_SP_MAX_AGEING_TIME 1000000 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; + u8 port_to_module[MLXSW_PORT_MAX_PORTS]; }; static inline struct mlxsw_sp_upper * @@ -149,7 +157,8 @@ struct mlxsw_sp_port { learning_sync:1, uc_flood:1, bridged:1, - lagged:1; + lagged:1, + split:1; u16 pvid; u16 lag_id; struct { @@ -162,6 +171,7 @@ struct mlxsw_sp_port { unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; + struct devlink_port devlink_port; }; static inline struct mlxsw_sp_port * @@ -254,5 +264,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, bool set, bool only_uc); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); +int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index e492ca2cdecd..e1c74efff51a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -311,8 +311,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; - if (switchdev_trans_ph_prepare(trans)) - return 0; + if (switchdev_trans_ph_prepare(trans)) { + if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || + ageing_time > MLXSW_SP_MAX_AGEING_TIME) + return -ERANGE; + else + return 0; + } return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } @@ -370,7 +375,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, return err; } -static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char spvid_pl[MLXSW_REG_SPVID_LEN]; @@ -379,6 +385,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); } +static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool allow) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char spaft_pl[MLXSW_REG_SPAFT_LEN]; + + mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); +} + +int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + if (!vid) { + err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); + if (err) { + netdev_err(dev, "Failed to disallow untagged traffic\n"); + return err; + } + } else { + err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + if (err) { + netdev_err(dev, "Failed to set PVID\n"); + return err; + } + + /* Only allow if not already allowed. */ + if (!mlxsw_sp_port->pvid) { + err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, + true); + if (err) { + netdev_err(dev, "Failed to allow untagged traffic\n"); + goto err_port_allow_untagged_set; + } + } + } + + mlxsw_sp_port->pvid = vid; + return 0; + +err_port_allow_untagged_set: + __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); + return err; +} + static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) { char sfmr_pl[MLXSW_REG_SFMR_LEN]; @@ -540,7 +593,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, netdev_err(dev, "Unable to add PVID %d\n", vid_begin); goto err_port_pvid_set; } - mlxsw_sp_port->pvid = vid_begin; + } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); + if (err) { + netdev_err(dev, "Unable to del PVID\n"); + goto err_port_pvid_set; + } } /* Changing activity bits only if HW operation succeded */ @@ -892,20 +950,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, return err; } + if (init) + goto out; + pvid = mlxsw_sp_port->pvid; - if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { - /* Default VLAN is always 1 */ - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + if (pvid >= vid_begin && pvid <= vid_end) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); if (err) { netdev_err(dev, "Unable to del PVID %d\n", pvid); return err; } - mlxsw_sp_port->pvid = 1; } - if (init) - goto out; - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, false); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index d85960cfb694..7a60a26759b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -43,6 +43,7 @@ #include <linux/device.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> +#include <net/devlink.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -78,6 +79,7 @@ struct mlxsw_sx_port { struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sx *mlxsw_sx; u8 local_port; + struct devlink_port devlink_port; }; /* tx_hdr_version @@ -953,7 +955,9 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) { + struct devlink *devlink = priv_to_devlink(mlxsw_sx->core); struct mlxsw_sx_port *mlxsw_sx_port; + struct devlink_port *devlink_port; struct net_device *dev; bool usable; int err; @@ -1007,6 +1011,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto port_not_usable; } + devlink_port = &mlxsw_sx_port->devlink_port; + err = devlink_port_register(devlink, devlink_port, local_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n", + mlxsw_sx_port->local_port); + goto err_devlink_port_register; + } + err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1064,6 +1076,8 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto err_register_netdev; } + devlink_port_type_eth_set(devlink_port, dev); + mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; @@ -1075,6 +1089,8 @@ err_port_mtu_set: err_port_speed_set: err_port_swid_set: err_port_system_port_mapping_set: + devlink_port_unregister(&mlxsw_sx_port->devlink_port); +err_devlink_port_register: port_not_usable: err_port_module_check: err_dev_addr_get: @@ -1087,11 +1103,15 @@ err_alloc_stats: static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) { struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; + struct devlink_port *devlink_port; if (!mlxsw_sx_port) return; + devlink_port = &mlxsw_sx_port->devlink_port; + devlink_port_type_clear(devlink_port); unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); + devlink_port_unregister(devlink_port); free_percpu(mlxsw_sx_port->pcpu_stats); free_netdev(mlxsw_sx_port->dev); } diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 09d2e16fd6b0..cb0102dd7f70 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -561,8 +561,8 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_init_table(sg, 1); sg_dma_address(sg) = dma_map_single(adapter->dev, ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); - err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); - if (unlikely(err)) { + if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) { + err = -ENOMEM; sg_dma_address(sg) = 0; goto out; } @@ -572,8 +572,10 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); - if (!ctl->adesc) + if (!ctl->adesc) { + err = -ENOMEM; goto out; + } ctl->adesc->callback_param = netdev; ctl->adesc->callback = ks8842_dma_rx_cb; @@ -584,7 +586,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) goto out; } - return err; + return 0; out: if (sg_dma_address(sg)) dma_unmap_single(adapter->dev, sg_dma_address(sg), diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 00cfd95ca59d..3e67f451f2ab 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -474,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ndev->base_addr = res->start; priv->base = devm_ioremap_resource(p_dev, res); - ret = IS_ERR(priv->base); - if (ret) { + if (IS_ERR(priv->base)) { dev_err(p_dev, "devm_ioremap_resource failed\n"); + ret = PTR_ERR(priv->base); goto init_fail; } diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index afa445842f3e..52d9a94aebb9 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -1038,7 +1038,7 @@ static int w90p910_ether_probe(struct platform_device *pdev) error = register_netdev(dev); if (error != 0) { - dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n"); + dev_err(&pdev->dev, "Register EMC w90p910 FAILED\n"); error = -ENODEV; goto failed_put_rmiiclk; } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 75e88f4c1531..9b0d7f463ff3 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5629,12 +5629,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) u64_stats_init(&np->swstats_rx_syncp); u64_stats_init(&np->swstats_tx_syncp); - init_timer(&np->oom_kick); - np->oom_kick.data = (unsigned long) dev; - np->oom_kick.function = nv_do_rx_refill; /* timer handler */ - init_timer(&np->nic_poll); - np->nic_poll.data = (unsigned long) dev; - np->nic_poll.function = nv_do_nic_poll; /* timer handler */ + setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev); + setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev); init_timer_deferrable(&np->stats_poll); np->stats_poll.data = (unsigned long) dev; np->stats_poll.function = nv_do_stats_poll; /* timer handler */ diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig deleted file mode 100644 index a7aa28054cc1..000000000000 --- a/drivers/net/ethernet/octeon/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -# -# Cavium network device configuration -# - -config OCTEON_MGMT_ETHERNET - tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" - depends on CAVIUM_OCTEON_SOC - select PHYLIB - select MDIO_OCTEON - default y - ---help--- - This option enables the ethernet driver for the management - port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, - CN54XX, CN52XX, and CN6XXX chips. diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig index db19c6f49859..7c92e8306c19 100644 --- a/drivers/net/ethernet/pasemi/Kconfig +++ b/drivers/net/ethernet/pasemi/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_PASEMI bool "PA Semi devices" default y - depends on PPC_PASEMI && PCI && INET + depends on PPC_PASEMI && PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -18,9 +18,8 @@ if NET_VENDOR_PASEMI config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" - depends on PPC_PASEMI && PCI && INET + depends on PPC_PASEMI && PCI select PHYLIB - select INET_LRO ---help--- This driver supports the on-chip 1/10Gbit Ethernet controller on PA Semi's PWRficient line of chips. diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 57a6e6cd74fc..af54df52aa6b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -30,9 +30,7 @@ #include <linux/skbuff.h> #include <linux/ip.h> -#include <linux/tcp.h> #include <net/checksum.h> -#include <linux/inet_lro.h> #include <linux/prefetch.h> #include <asm/irq.h> @@ -52,12 +50,9 @@ * * - Multicast support * - Large MTU support - * - SW LRO * - Multiqueue RX/TX */ -#define LRO_MAX_AGGR 64 - #define PE_MIN_MTU 64 #define PE_MAX_MTU 9000 #define PE_DEF_MTU ETH_DATA_LEN @@ -257,37 +252,6 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) return 0; } -static int get_skb_hdr(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *data) -{ - u64 macrx = (u64) data; - unsigned int ip_len; - struct iphdr *iph; - - /* IPv4 header checksum failed */ - if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) - return -1; - - /* non tcp packet */ - skb_reset_network_header(skb); - iph = ip_hdr(skb); - if (iph->protocol != IPPROTO_TCP) - return -1; - - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - *tcph = tcp_hdr(skb); - - /* check if ip header and tcp header are complete */ - if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) - return -1; - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *iphdr = iph; - - return 0; -} - static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, const int nfrags, struct sk_buff *skb, @@ -817,7 +781,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, skb_put(skb, len-4); skb->protocol = eth_type_trans(skb, mac->netdev); - lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); + napi_gro_receive(&mac->napi, skb); next: RX_DESC(rx, n) = 0; @@ -839,8 +803,6 @@ next: rx_ring(mac)->next_to_clean = n; - lro_flush_all(&mac->lro_mgr); - /* Increase is in number of 16-byte entries, and since each descriptor * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with * count*2. @@ -1754,16 +1716,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_GSO; - mac->lro_mgr.max_aggr = LRO_MAX_AGGR; - mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; - mac->lro_mgr.lro_arr = mac->lro_desc; - mac->lro_mgr.get_skb_header = get_skb_hdr; - mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - mac->lro_mgr.dev = mac->netdev; - mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; - - mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); if (!mac->dma_pdev) { dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h index a5807703ab96..161c99a98403 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.h +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -31,7 +31,6 @@ #define CS_RING_SIZE (TX_RING_SIZE*2) -#define MAX_LRO_DESCRIPTORS 8 #define MAX_CS 2 struct pasemi_mac_txring { @@ -84,10 +83,7 @@ struct pasemi_mac { u8 mac_addr[ETH_ALEN]; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; struct timer_list rxtimer; - unsigned int lro_max_aggr; struct pasemi_mac_txring *tx; struct pasemi_mac_rxring *rx; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c index 25fae568261f..f046bfc18e7d 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c @@ -20,7 +20,6 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> -#include <linux/inet_lro.h> #include <asm/pasemi_dma.h> #include "pasemi_mac.h" diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 6409a06bbdf6..fd362b6923f4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2891,7 +2891,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; @@ -2919,7 +2919,7 @@ netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; @@ -2960,7 +2960,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; @@ -2981,7 +2981,7 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; @@ -3018,7 +3018,7 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); struct net_device *netdev = adapter->netdev; struct netxen_dimm_cfg dimm; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1292c360390c..fcb8e9ba51d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,7 +26,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.4.0.0" +#define DRV_MODULE_VERSION "8.7.0.0" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 @@ -70,8 +70,8 @@ struct qed_sb_sp_info; struct qed_mcp_info; struct qed_rt_data { - u32 init_val; - bool b_valid; + u32 *init_val; + bool *b_valid; }; /* The PCI personality is not quite synonymous to protocol ID: @@ -120,6 +120,10 @@ enum QED_PORT_MODE { QED_PORT_MODE_DE_1X25G }; +enum qed_dev_cap { + QED_DEV_CAP_ETH, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -142,15 +146,13 @@ struct qed_hw_info { u16 ovlan; u32 part_num[4]; - u32 vendor_id; - u32 device_id; - unsigned char hw_mac_addr[ETH_ALEN]; struct qed_igu_info *p_igu_info; u32 port_mode; u32 hw_mode; + unsigned long device_capabilities; }; struct qed_hw_cid_data { @@ -267,7 +269,7 @@ struct qed_hwfn { struct qed_hw_info hw_info; /* rt_array (for init-tool) */ - struct qed_rt_data *rt_data; + struct qed_rt_data rt_data; /* SPQ */ struct qed_spq *p_spq; @@ -301,6 +303,9 @@ struct qed_hwfn { bool b_int_enabled; bool b_int_requested; + /* True if the driver requests for the link */ + bool b_drv_link_init; + struct qed_mcp_info *mcp_info; struct qed_hw_cid_data *p_tx_cids; @@ -350,9 +355,20 @@ struct qed_dev { char name[NAME_SIZE]; u8 type; -#define QED_DEV_TYPE_BB_A0 (0 << 0) -#define QED_DEV_TYPE_MASK (0x3) -#define QED_DEV_TYPE_SHIFT (0) +#define QED_DEV_TYPE_BB (0 << 0) +#define QED_DEV_TYPE_AH BIT(0) +/* Translate type/revision combo into the proper conditions */ +#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) +#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ + CHIP_REV_IS_A0(dev)) +#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ + CHIP_REV_IS_B0(dev)) + +#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \ + QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2) + + u16 vendor_id; + u16 device_id; u16 chip_num; #define CHIP_NUM_MASK 0xffff @@ -361,6 +377,8 @@ struct qed_dev { u16 chip_rev; #define CHIP_REV_MASK 0xf #define CHIP_REV_SHIFT 12 +#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev) +#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) u16 chip_metal; #define CHIP_METAL_MASK 0xff @@ -375,10 +393,10 @@ struct qed_dev { u8 num_funcs_in_port; u8 path_id; - enum mf_mode mf_mode; -#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF) -#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR) -#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN) + enum qed_mf_mode mf_mode; +#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) +#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) +#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) int pcie_width; int pcie_speed; @@ -441,11 +459,6 @@ struct qed_dev { const struct firmware *firmware; }; -#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \ - QED_DEV_TYPE_SHIFT) -#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0) -#define QED_IS_BB(dev) (QED_IS_BB_A0(dev)) - #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7ccdb46c6764..fc767c07a264 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -448,7 +448,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) struct qed_cxt_mngr *p_mngr; u32 i; - p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC); + p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL); if (!p_mngr) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n"); return -ENOMEM; @@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn) params.num_pf_cids = iids.cids; params.start_pq = qm_info->start_pq; params.num_pf_pqs = qm_info->num_pqs; - params.start_vport = qm_info->num_vports; + params.start_vport = qm_info->start_vport; + params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; params.pq_params = qm_info->qm_pq_params; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 817bbd5476ff..b7d100f6bd6f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -32,6 +32,33 @@ #include "qed_sp.h" /* API common to all protocols */ +enum BAR_ID { + BAR_ID_0, /* used for GRC */ + BAR_ID_1 /* Used for doorbells */ +}; + +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, + enum BAR_ID bar_id) +{ + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + + if (val) + return 1 << (val + 15); + + /* Old MFW initialized above registered only conditionally */ + if (p_hwfn->cdev->num_hwfns > 1) { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); + return BAR_ID_0 ? 256 * 1024 : 512 * 1024; + } else { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); + return 512 * 1024; + } +} + void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) { @@ -134,17 +161,17 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. */ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * - num_pqs, GFP_ATOMIC); + num_pqs, GFP_KERNEL); if (!qm_info->qm_pq_params) goto alloc_err; qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * - num_vports, GFP_ATOMIC); + num_vports, GFP_KERNEL); if (!qm_info->qm_vport_params) goto alloc_err; qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * - MAX_NUM_PORTS, GFP_ATOMIC); + MAX_NUM_PORTS, GFP_KERNEL); if (!qm_info->qm_port_params) goto alloc_err; @@ -341,11 +368,6 @@ void qed_resc_setup(struct qed_dev *cdev) } } -#define FINAL_CLEANUP_CMD_OFFSET (0) -#define FINAL_CLEANUP_CMD (0x1) -#define FINAL_CLEANUP_VALID_OFFSET (6) -#define FINAL_CLEANUP_VFPF_ID_SHIFT (7) -#define FINAL_CLEANUP_COMP (0x2) #define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_TIME (10) int qed_final_cleanup(struct qed_hwfn *p_hwfn, @@ -355,12 +377,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; - addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET; + addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); - command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET; - command |= 1 << FINAL_CLEANUP_VALID_OFFSET; - command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT; - command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT; + command |= X_FINAL_CLEANUP_AGG_INT << + SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; + command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; + command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; + command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; /* Make sure notification is not set before initiating final cleanup */ if (REG_RD(p_hwfn, addr)) { @@ -396,7 +420,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) { int hw_mode = 0; - hw_mode = (1 << MODE_BB_A0); + hw_mode = (1 << MODE_BB_B0); switch (p_hwfn->cdev->num_ports_in_engines) { case 1: @@ -415,18 +439,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) } switch (p_hwfn->cdev->mf_mode) { - case SF: - hw_mode |= 1 << MODE_SF; + case QED_MF_DEFAULT: + case QED_MF_NPAR: + hw_mode |= 1 << MODE_MF_SI; break; - case MF_OVLAN: + case QED_MF_OVLAN: hw_mode |= 1 << MODE_MF_SD; break; - case MF_NPAR: - hw_mode |= 1 << MODE_MF_SI; - break; default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n"); - hw_mode |= 1 << MODE_SF; + DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + hw_mode |= 1 << MODE_MF_SI; } hw_mode |= 1 << MODE_ASIC; @@ -655,10 +677,8 @@ int qed_hw_init(struct qed_dev *cdev, bool allow_npar_tx_switch, const u8 *bin_fw_data) { - struct qed_storm_stats *p_stat; - u32 load_code, param, *p_address; + u32 load_code, param; int rc, mfw_rc, i; - u8 fw_vport = 0; rc = qed_init_fw_data(cdev, bin_fw_data); if (rc != 0) @@ -667,10 +687,6 @@ int qed_hw_init(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - rc = qed_fw_vport(p_hwfn, 0, &fw_vport); - if (rc != 0) - return rc; - /* Enable DMAE in PXP */ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); @@ -734,35 +750,60 @@ int qed_hw_init(struct qed_dev *cdev, } p_hwfn->hw_init_done = true; + } + + return 0; +} - /* init PF stats */ - p_stat = &p_hwfn->storm_stats; - p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM + - MSTORM_QUEUE_STAT_OFFSET(fw_vport); - p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); +#define QED_HW_STOP_RETRY_LIMIT (10) +static inline void qed_hw_timers_stop(struct qed_dev *cdev, + struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + int i; - p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM + - USTORM_QUEUE_STAT_OFFSET(fw_vport); - p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); + /* close timers */ + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); - p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM + - PSTORM_QUEUE_STAT_OFFSET(fw_vport); - p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { + if ((!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK))) + break; - p_address = &p_stat->tstats.address; - *p_address = BAR0_MAP_REG_TSDM_RAM + - TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); - p_stat->tstats.len = sizeof(struct tstorm_per_port_stat); + /* Dependent on number of connection/tasks, possibly + * 1ms sleep is required between polls + */ + usleep_range(1000, 2000); } - return 0; + if (i < QED_HW_STOP_RETRY_LIMIT) + return; + + DP_NOTICE(p_hwfn, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); +} + +void qed_hw_timers_stop_all(struct qed_dev *cdev) +{ + int j; + + for_each_hwfn(cdev, j) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); + } } -#define QED_HW_STOP_RETRY_LIMIT (10) int qed_hw_stop(struct qed_dev *cdev) { int rc = 0, t_rc; - int i, j; + int j; for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; @@ -775,7 +816,8 @@ int qed_hw_stop(struct qed_dev *cdev) rc = qed_sp_pf_stop(p_hwfn); if (rc) - return rc; + DP_NOTICE(p_hwfn, + "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n"); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); @@ -786,24 +828,7 @@ int qed_hw_stop(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); - for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { - if ((!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN)) && - (!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK))) - break; - - usleep_range(1000, 2000); - } - if (i == QED_HW_STOP_RETRY_LIMIT) - DP_NOTICE(p_hwfn, - "Timers linear scans are not over [Connection %02x Tasks %02x]\n", - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN), - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK)); + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); /* Disable Attention Generation */ qed_int_igu_disable_int(p_hwfn, p_ptt); @@ -832,7 +857,7 @@ int qed_hw_stop(struct qed_dev *cdev) void qed_hw_stop_fastpath(struct qed_dev *cdev) { - int i, j; + int j; for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; @@ -851,25 +876,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); - for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { - if ((!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN)) && - (!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK))) - break; - - usleep_range(1000, 2000); - } - if (i == QED_HW_STOP_RETRY_LIMIT) - DP_NOTICE(p_hwfn, - "Timers linear scans are not over [Connection %02x Tasks %02x]\n", - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN), - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK)); - qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); /* Need to wait 1ms to guarantee SBs are cleared */ @@ -954,18 +960,8 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) } /* Setup bar access */ -static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) +static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) { - int rc; - - /* Allocate PTT pool */ - rc = qed_ptt_pool_alloc(p_hwfn); - if (rc) - return rc; - - /* Allocate the main PTT */ - p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); - /* clear indirect access */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); @@ -980,8 +976,6 @@ static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - - return 0; } static void get_function_id(struct qed_hwfn *p_hwfn) @@ -1016,14 +1010,17 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) { u32 *resc_start = p_hwfn->hw_info.resc_start; u32 *resc_num = p_hwfn->hw_info.resc_num; + struct qed_sb_cnt_info sb_cnt_info; int num_funcs, i; - num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB - : p_hwfn->cdev->num_ports_in_engines; + num_funcs = MAX_NUM_PFS_BB; + + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); resc_num[QED_SB] = min_t(u32, (MAX_SB_PER_PATH_BB / num_funcs), - qed_int_get_num_sbs(p_hwfn, NULL)); + sb_cnt_info.sb_cnt); resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs; resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; @@ -1071,7 +1068,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; - u32 port_cfg_addr, link_temp, val, nvm_cfg_addr; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -1086,13 +1083,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); - /* Read Vendor Id / Device Id */ - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, glob) + - offsetof(struct nvm_cfg1_glob, pci_id); - p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) & - NVM_CFG1_GLOB_VENDOR_ID_MASK; - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, core_cfg); @@ -1134,21 +1124,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, break; } - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) + - offsetof(struct nvm_cfg1_func, device_id); - val = qed_rd(p_hwfn, p_ptt, addr); - - if (IS_MF(p_hwfn)) { - p_hwfn->hw_info.device_id = - (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >> - NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET; - } else { - p_hwfn->hw_info.device_id = - (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >> - NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET; - } - /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -1220,18 +1195,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_mode = MF_OVLAN; + p_hwfn->cdev->mf_mode = QED_MF_OVLAN; break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_mode = MF_NPAR; + p_hwfn->cdev->mf_mode = QED_MF_NPAR; break; - case NVM_CFG1_GLOB_MF_MODE_FORCED_SF: - p_hwfn->cdev->mf_mode = SF; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; break; } DP_INFO(p_hwfn, "Multi function mode is %08x\n", p_hwfn->cdev->mf_mode); + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, device_capabilities); + + device_capabilities = qed_rd(p_hwfn, p_ptt, addr); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) + __set_bit(QED_DEV_CAP_ETH, + &p_hwfn->hw_info.device_capabilities); + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } @@ -1291,31 +1276,38 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, return rc; } -static void qed_get_dev_info(struct qed_dev *cdev) +static int qed_get_dev_info(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); u32 tmp; - cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + /* Read Vendor Id / Device Id */ + pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, + &cdev->vendor_id); + pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, + &cdev->device_id); + cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM); - cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_REV); MASK_FIELD(CHIP_REV, cdev->chip_rev); + cdev->type = QED_DEV_TYPE_BB; /* Learn number of HW-functions */ - tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); - if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) { + if (tmp & (1 << p_hwfn->rel_pf_id)) { DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); cdev->num_hwfns = 2; } else { cdev->num_hwfns = 1; } - cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_TEST_REG) >> 4; MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); - cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_METAL); MASK_FIELD(CHIP_METAL, cdev->chip_metal); @@ -1323,6 +1315,14 @@ static void qed_get_dev_info(struct qed_dev *cdev) "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", cdev->chip_num, cdev->chip_rev, cdev->chip_bond_id, cdev->chip_metal); + + if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { + DP_NOTICE(cdev->hwfns, + "The chip type/rev (BB A0) is not supported!\n"); + return -EINVAL; + } + + return 0; } static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, @@ -1345,15 +1345,24 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, get_function_id(p_hwfn); - rc = qed_hw_hwfn_prepare(p_hwfn); + /* Allocate PTT pool */ + rc = qed_ptt_pool_alloc(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n"); goto err0; } + /* Allocate the main PTT */ + p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); + /* First hwfn learns basic information, e.g., number of hwfns */ - if (!p_hwfn->my_id) - qed_get_dev_info(p_hwfn->cdev); + if (!p_hwfn->my_id) { + rc = qed_get_dev_info(p_hwfn->cdev); + if (rc != 0) + goto err1; + } + + qed_hw_hwfn_prepare(p_hwfn); /* Initialize MCP structure */ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); @@ -1385,17 +1394,6 @@ err0: return rc; } -static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, - u8 bar_id) -{ - u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE - : PGLUE_B_REG_PF_BAR1_SIZE); - u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); - - /* Get the BAR size(in KB) from hardware given val */ - return 1 << (val + 15); -} - int qed_hw_prepare(struct qed_dev *cdev, int personality) { @@ -1420,11 +1418,11 @@ int qed_hw_prepare(struct qed_dev *cdev, u8 __iomem *addr; /* adjust bar offset for second engine */ - addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; + addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2; p_regview = addr; /* adjust doorbell bar offset for second engine */ - addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; + addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2; p_doorbell = addr; /* prepare second hw function */ @@ -1536,223 +1534,6 @@ void qed_chain_free(struct qed_dev *cdev, p_chain->p_phys_addr); } -static void __qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) -{ - int i, j; - - memset(stats, 0, sizeof(*stats)); - - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct eth_mstorm_per_queue_stat mstats; - struct eth_ustorm_per_queue_stat ustats; - struct eth_pstorm_per_queue_stat pstats; - struct tstorm_per_port_stat tstats; - struct port_stats port_stats; - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); - - if (!p_ptt) { - DP_ERR(p_hwfn, "Failed to acquire ptt\n"); - continue; - } - - memset(&mstats, 0, sizeof(mstats)); - qed_memcpy_from(p_hwfn, p_ptt, &mstats, - p_hwfn->storm_stats.mstats.address, - p_hwfn->storm_stats.mstats.len); - - memset(&ustats, 0, sizeof(ustats)); - qed_memcpy_from(p_hwfn, p_ptt, &ustats, - p_hwfn->storm_stats.ustats.address, - p_hwfn->storm_stats.ustats.len); - - memset(&pstats, 0, sizeof(pstats)); - qed_memcpy_from(p_hwfn, p_ptt, &pstats, - p_hwfn->storm_stats.pstats.address, - p_hwfn->storm_stats.pstats.len); - - memset(&tstats, 0, sizeof(tstats)); - qed_memcpy_from(p_hwfn, p_ptt, &tstats, - p_hwfn->storm_stats.tstats.address, - p_hwfn->storm_stats.tstats.len); - - memset(&port_stats, 0, sizeof(port_stats)); - - if (p_hwfn->mcp_info) - qed_memcpy_from(p_hwfn, p_ptt, &port_stats, - p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, stats), - sizeof(port_stats)); - qed_ptt_release(p_hwfn, p_ptt); - - stats->no_buff_discards += - HILO_64_REGPAIR(mstats.no_buff_discard); - stats->packet_too_big_discard += - HILO_64_REGPAIR(mstats.packet_too_big_discard); - stats->ttl0_discard += - HILO_64_REGPAIR(mstats.ttl0_discard); - stats->tpa_coalesced_pkts += - HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); - stats->tpa_coalesced_events += - HILO_64_REGPAIR(mstats.tpa_coalesced_events); - stats->tpa_aborts_num += - HILO_64_REGPAIR(mstats.tpa_aborts_num); - stats->tpa_coalesced_bytes += - HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); - - stats->rx_ucast_bytes += - HILO_64_REGPAIR(ustats.rcv_ucast_bytes); - stats->rx_mcast_bytes += - HILO_64_REGPAIR(ustats.rcv_mcast_bytes); - stats->rx_bcast_bytes += - HILO_64_REGPAIR(ustats.rcv_bcast_bytes); - stats->rx_ucast_pkts += - HILO_64_REGPAIR(ustats.rcv_ucast_pkts); - stats->rx_mcast_pkts += - HILO_64_REGPAIR(ustats.rcv_mcast_pkts); - stats->rx_bcast_pkts += - HILO_64_REGPAIR(ustats.rcv_bcast_pkts); - - stats->mftag_filter_discards += - HILO_64_REGPAIR(tstats.mftag_filter_discard); - stats->mac_filter_discards += - HILO_64_REGPAIR(tstats.eth_mac_filter_discard); - - stats->tx_ucast_bytes += - HILO_64_REGPAIR(pstats.sent_ucast_bytes); - stats->tx_mcast_bytes += - HILO_64_REGPAIR(pstats.sent_mcast_bytes); - stats->tx_bcast_bytes += - HILO_64_REGPAIR(pstats.sent_bcast_bytes); - stats->tx_ucast_pkts += - HILO_64_REGPAIR(pstats.sent_ucast_pkts); - stats->tx_mcast_pkts += - HILO_64_REGPAIR(pstats.sent_mcast_pkts); - stats->tx_bcast_pkts += - HILO_64_REGPAIR(pstats.sent_bcast_pkts); - stats->tx_err_drop_pkts += - HILO_64_REGPAIR(pstats.error_drop_pkts); - stats->rx_64_byte_packets += port_stats.pmm.r64; - stats->rx_127_byte_packets += port_stats.pmm.r127; - stats->rx_255_byte_packets += port_stats.pmm.r255; - stats->rx_511_byte_packets += port_stats.pmm.r511; - stats->rx_1023_byte_packets += port_stats.pmm.r1023; - stats->rx_1518_byte_packets += port_stats.pmm.r1518; - stats->rx_1522_byte_packets += port_stats.pmm.r1522; - stats->rx_2047_byte_packets += port_stats.pmm.r2047; - stats->rx_4095_byte_packets += port_stats.pmm.r4095; - stats->rx_9216_byte_packets += port_stats.pmm.r9216; - stats->rx_16383_byte_packets += port_stats.pmm.r16383; - stats->rx_crc_errors += port_stats.pmm.rfcs; - stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; - stats->rx_pause_frames += port_stats.pmm.rxpf; - stats->rx_pfc_frames += port_stats.pmm.rxpp; - stats->rx_align_errors += port_stats.pmm.raln; - stats->rx_carrier_errors += port_stats.pmm.rfcr; - stats->rx_oversize_packets += port_stats.pmm.rovr; - stats->rx_jabbers += port_stats.pmm.rjbr; - stats->rx_undersize_packets += port_stats.pmm.rund; - stats->rx_fragments += port_stats.pmm.rfrg; - stats->tx_64_byte_packets += port_stats.pmm.t64; - stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; - stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; - stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; - stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; - stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; - stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; - stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; - stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; - stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; - stats->tx_pause_frames += port_stats.pmm.txpf; - stats->tx_pfc_frames += port_stats.pmm.txpp; - stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; - stats->tx_total_collisions += port_stats.pmm.tncl; - stats->rx_mac_bytes += port_stats.pmm.rbyte; - stats->rx_mac_uc_packets += port_stats.pmm.rxuca; - stats->rx_mac_mc_packets += port_stats.pmm.rxmca; - stats->rx_mac_bc_packets += port_stats.pmm.rxbca; - stats->rx_mac_frames_ok += port_stats.pmm.rxpok; - stats->tx_mac_bytes += port_stats.pmm.tbyte; - stats->tx_mac_uc_packets += port_stats.pmm.txuca; - stats->tx_mac_mc_packets += port_stats.pmm.txmca; - stats->tx_mac_bc_packets += port_stats.pmm.txbca; - stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; - - for (j = 0; j < 8; j++) { - stats->brb_truncates += port_stats.brb.brb_truncate[j]; - stats->brb_discards += port_stats.brb.brb_discard[j]; - } - } -} - -void qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) -{ - u32 i; - - if (!cdev) { - memset(stats, 0, sizeof(*stats)); - return; - } - - __qed_get_vport_stats(cdev, stats); - - if (!cdev->reset_stats) - return; - - /* Reduce the statistics baseline */ - for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) - ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; -} - -/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ -void qed_reset_vport_stats(struct qed_dev *cdev) -{ - int i; - - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct eth_mstorm_per_queue_stat mstats; - struct eth_ustorm_per_queue_stat ustats; - struct eth_pstorm_per_queue_stat pstats; - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); - - if (!p_ptt) { - DP_ERR(p_hwfn, "Failed to acquire ptt\n"); - continue; - } - - memset(&mstats, 0, sizeof(mstats)); - qed_memcpy_to(p_hwfn, p_ptt, - p_hwfn->storm_stats.mstats.address, - &mstats, - p_hwfn->storm_stats.mstats.len); - - memset(&ustats, 0, sizeof(ustats)); - qed_memcpy_to(p_hwfn, p_ptt, - p_hwfn->storm_stats.ustats.address, - &ustats, - p_hwfn->storm_stats.ustats.len); - - memset(&pstats, 0, sizeof(pstats)); - qed_memcpy_to(p_hwfn, p_ptt, - p_hwfn->storm_stats.pstats.address, - &pstats, - p_hwfn->storm_stats.pstats.len); - - qed_ptt_release(p_hwfn, p_ptt); - } - - /* PORT statistics are not necessarily reset, so we need to - * read and create a baseline for future statistics. - */ - if (!cdev->reset_stats) - DP_INFO(cdev, "Reset stats not allocated\n"); - else - __qed_get_vport_stats(cdev, cdev->reset_stats); -} - int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index e29a3ba6c8b0..d6c7ddf4f4d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -78,6 +78,15 @@ int qed_hw_init(struct qed_dev *cdev, const u8 *bin_fw_data); /** + * @brief qed_hw_timers_stop_all - stop the timers HW block + * + * @param cdev + * + * @return void + */ +void qed_hw_timers_stop_all(struct qed_dev *cdev); + +/** * @brief qed_hw_stop - * * @param cdev @@ -156,8 +165,6 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -void qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); enum qed_dmae_address_type_t { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 264e954675d1..a368f5e71d95 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -34,6 +34,8 @@ enum common_event_opcode { COMMON_EVENT_RESERVED3, COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED5, + COMMON_EVENT_RESERVED6, + COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE }; @@ -45,6 +47,7 @@ enum common_ramrod_cmd_id { COMMON_RAMROD_RESERVED, COMMON_RAMROD_RESERVED2, COMMON_RAMROD_RESERVED3, + COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID }; @@ -331,6 +334,179 @@ struct xstorm_core_conn_ag_ctx { __le16 word15 /* word15 */; }; +struct tstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 word1 /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_core_conn_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 word1 /* word1 */; + __le32 rx_producers /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; +}; + /* The core storm context for the Mstorm */ struct mstorm_core_conn_st_ctx { __le32 reserved[24]; @@ -349,8 +525,9 @@ struct core_conn_context { struct regpair pstorm_st_padding[2]; struct xstorm_core_conn_st_ctx xstorm_st_context; struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; struct mstorm_core_conn_st_ctx mstorm_st_context; - struct regpair mstorm_st_padding[2]; struct ustorm_core_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2] /* padding */; }; @@ -397,10 +574,12 @@ union event_ring_element { }; enum personality_type { + BAD_PERSONALITY_TYP, PERSONALITY_RESERVED, PERSONALITY_RESERVED2, PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */, PERSONALITY_RESERVED3, + PERSONALITY_CORE, PERSONALITY_ETH /* Ethernet */, PERSONALITY_RESERVED4, MAX_PERSONALITY_TYPE @@ -570,7 +749,7 @@ enum block_addr { GRCBASE_NWM = 0x800000, GRCBASE_NWS = 0x700000, GRCBASE_MS = 0x6a0000, - GRCBASE_PHY_PCIE = 0x618000, + GRCBASE_PHY_PCIE = 0x620000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -789,19 +968,19 @@ struct igu_msix_vector { enum init_modes { MODE_BB_A0, - MODE_RESERVED, + MODE_BB_B0, MODE_RESERVED2, MODE_ASIC, MODE_RESERVED3, MODE_RESERVED4, MODE_RESERVED5, + MODE_RESERVED6, MODE_SF, MODE_MF_SD, MODE_MF_SI, MODE_PORTS_PER_ENG_1, MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, - MODE_40G, MODE_100G, MODE_EAGLE_ENG1_WORKAROUND, MAX_INIT_MODES @@ -816,43 +995,6 @@ enum init_phases { MAX_INIT_PHASES }; -struct mstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0 /* word0 */; - __le16 word1 /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; -}; - /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -945,6 +1087,17 @@ struct qm_rf_pq_map { #define QM_RF_PQ_MAP_RESERVED_SHIFT 26 }; +/* Completion params for aggregated interrupt completion */ +struct sdm_agg_int_comp_params { + __le16 params; +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 +}; + /* SDM operation gen command (generate aggregative interrupt) */ struct sdm_op_gen { __le32 command; @@ -956,223 +1109,6 @@ struct sdm_op_gen { #define SDM_OP_GEN_RESERVED_SHIFT 20 }; -struct tstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 word1 /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; -}; - -struct ustorm_core_conn_ag_ctx { - u8 reserved /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 - u8 flags2; -#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 word1 /* word1 */; - __le32 rx_producers /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; -}; - -struct ystorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le16 word1 /* word1 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; - __le16 word4 /* word4 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; -}; - /*********************************** Init ************************************/ /* Width of GRC address in bits (addresses are specified in dwords) */ @@ -1274,13 +1210,6 @@ enum chip_ids { MAX_CHIP_IDS }; -enum idle_chk_severity_types { - IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */, - IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, - IDLE_CHK_SEVERITY_WARNING, - MAX_IDLE_CHK_SEVERITY_TYPES -}; - struct init_array_raw_hdr { __le32 data; #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF @@ -1340,14 +1269,6 @@ struct init_callback_op { __le16 block_id /* Blocks ID */; }; -/* init comparison types */ -enum init_comparison_types { - INIT_COMPARISON_EQ /* init value is included in the init command */, - INIT_COMPARISON_OR /* init value is all zeros */, - INIT_COMPARISON_AND /* init value is an array of values */, - MAX_INIT_COMPARISON_TYPES -}; - /* init operation: delay */ struct init_delay_op { __le32 op_data; @@ -1444,12 +1365,10 @@ struct init_read_op { __le32 op_data; #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 -#define INIT_READ_OP_POLL_COMP_MASK 0x7 -#define INIT_READ_OP_POLL_COMP_SHIFT 4 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 #define INIT_READ_OP_RESERVED_MASK 0x1 -#define INIT_READ_OP_RESERVED_SHIFT 7 -#define INIT_READ_OP_POLL_MASK 0x1 -#define INIT_READ_OP_POLL_SHIFT 8 +#define INIT_READ_OP_RESERVED_SHIFT 8 #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 __le32 expected_val; @@ -1477,6 +1396,14 @@ enum init_op_types { MAX_INIT_OP_TYPES }; +enum init_poll_types { + INIT_POLL_NONE /* No polling */, + INIT_POLL_EQ /* init value is included in the init command */, + INIT_POLL_OR /* init value is all zeros */, + INIT_POLL_AND /* init value is an array of values */, + MAX_INIT_POLL_TYPES +}; + /* init source types */ enum init_source_types { INIT_SRC_INLINE /* init value is included in the init command */, @@ -1677,175 +1604,213 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, u16 num_pqs); /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) /* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \ - ((port_id) * \ - IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[2].base + ((port_id) * IRO[2].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) /* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \ - ((vf_id) * \ - IRO[2].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size) +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ + (IRO[3].base + ((vf_id) * IRO[3].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) /* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size) +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1)) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) /* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \ - ((pf_id) * \ - IRO[4].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[4].size) -/* Ustorm Completion ring consumer */ -#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \ - ((global_queue_id) * \ - IRO[5].m1)) -#define USTORM_CQ_CONS_SIZE (IRO[5].size) +#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[5].size) +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \ + (IRO[6].base + ((global_queue_id) * IRO[6].m1)) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size) /* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size) +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) /* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) /* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) /* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) /* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) /* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) /* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \ - ((core_rx_queue_id) * \ - IRO[12].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size) -/* Tstorm LiteL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \ - ((core_rx_q_id) * \ - IRO[13].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size) +#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ + (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size) +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) /* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \ - ((core_rx_q_id) * \ - IRO[14].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) /* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \ - ((core_txst_id) * \ - IRO[15].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) /* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \ - ((stat_counter_id) * \ - IRO[16].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size) +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[17].base + ((stat_counter_id) * IRO[17].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size) /* Mstorm producers */ -#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \ - ((queue_id) * \ - IRO[17].m1)) -#define MSTORM_PRODS_SIZE (IRO[17].size) +#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1)) +#define MSTORM_PRODS_SIZE (IRO[18].size) /* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size) +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size) /* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \ - ((stat_counter_id) * \ - IRO[19].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[19].size) +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[20].base + ((stat_counter_id) * IRO[20].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[20].size) /* Ustorm queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \ - ((queue_id) * \ - IRO[20].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size) +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[21].base + ((queue_id) * IRO[21].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size) /* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \ - ((stat_counter_id) * \ - IRO[21].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size) +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size) /* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \ - ((pf_id) * \ - IRO[22].m1)) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size) +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size) /* Ystorm queue zone */ -#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \ - ((queue_id) * \ - IRO[23].m1)) -#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size) +#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[25].base + ((queue_id) * IRO[25].m1)) +#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size) /* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \ - ((rss_id) * \ - IRO[24].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size) +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[26].base + ((rss_id) * IRO[26].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size) /* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \ - ((rss_id) * \ - IRO[25].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size) +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[27].base + ((rss_id) * IRO[27].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size) /* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \ - ((pf_id) * \ - IRO[26].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size) +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[28].base + ((pf_id) * IRO[28].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size) /* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \ - ((cmdq_queue_id) * \ - IRO[27].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size) +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ + (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size) /* Mstorm rq-cons of given queue-id */ -#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \ - ((rq_queue_id) * \ - IRO[28].m1)) -#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size) +#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \ + (IRO[30].base + ((rq_queue_id) * IRO[30].m1)) +#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size) +/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size) +/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */ +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size) +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[33].base + ((pf_id) * IRO[33].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size) +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[34].base + ((pf_id) * IRO[34].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size) +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[35].base + ((pf_id) * IRO[35].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size) +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[36].base + ((pf_id) * IRO[36].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size) +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size) +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size) +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size) +/* Mstorm FCoE RX stats */ +#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size) +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size) /* Pstorm RoCE statistics */ -#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \ - ((stat_counter_id) * \ - IRO[29].m1)) -#define PSTORM_ROCE_STAT_SIZE (IRO[29].size) +#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ + (IRO[42].base + ((stat_counter_id) * IRO[42].m1)) +#define PSTORM_ROCE_STAT_SIZE (IRO[42].size) /* Tstorm RoCE statistics */ -#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \ - ((stat_counter_id) * \ - IRO[30].m1)) -#define TSTORM_ROCE_STAT_SIZE (IRO[30].size) - -static const struct iro iro_arr[31] = { - { 0x10, 0x0, 0x0, 0x0, 0x8 }, - { 0x4448, 0x60, 0x0, 0x0, 0x60 }, - { 0x498, 0x8, 0x0, 0x0, 0x4 }, - { 0x494, 0x0, 0x0, 0x0, 0x4 }, - { 0x10, 0x8, 0x0, 0x0, 0x2 }, - { 0x90, 0x8, 0x0, 0x0, 0x2 }, - { 0x4540, 0x0, 0x0, 0x0, 0xf8 }, - { 0x39e0, 0x0, 0x0, 0x0, 0xf8 }, - { 0x2598, 0x0, 0x0, 0x0, 0xf8 }, - { 0x4350, 0x0, 0x0, 0x0, 0xf8 }, - { 0x52d0, 0x0, 0x0, 0x0, 0xf8 }, - { 0x7a48, 0x0, 0x0, 0x0, 0xf8 }, - { 0x100, 0x8, 0x0, 0x0, 0x8 }, - { 0x5808, 0x10, 0x0, 0x0, 0x10 }, - { 0xb100, 0x30, 0x0, 0x0, 0x30 }, - { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, - { 0x54f8, 0x40, 0x0, 0x0, 0x40 }, - { 0x200, 0x10, 0x0, 0x0, 0x8 }, - { 0x9e70, 0x0, 0x0, 0x0, 0x4 }, - { 0x7ca0, 0x40, 0x0, 0x0, 0x30 }, - { 0xd00, 0x8, 0x0, 0x0, 0x8 }, - { 0x2790, 0x80, 0x0, 0x0, 0x38 }, - { 0xa520, 0xf0, 0x0, 0x0, 0xf0 }, - { 0x80, 0x8, 0x0, 0x0, 0x8 }, - { 0xac0, 0x8, 0x0, 0x0, 0x8 }, - { 0x2580, 0x8, 0x0, 0x0, 0x8 }, - { 0x2500, 0x8, 0x0, 0x0, 0x8 }, - { 0x440, 0x8, 0x0, 0x0, 0x2 }, - { 0x1800, 0x8, 0x0, 0x0, 0x2 }, - { 0x27c8, 0x80, 0x0, 0x0, 0x10 }, - { 0x4710, 0x10, 0x0, 0x0, 0x10 }, +#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ + (IRO[43].base + ((stat_counter_id) * IRO[43].m1)) +#define TSTORM_ROCE_STAT_SIZE (IRO[43].size) + +static const struct iro iro_arr[44] = { + { 0x10, 0x0, 0x0, 0x0, 0x8 }, + { 0x47c8, 0x60, 0x0, 0x0, 0x60 }, + { 0x5e30, 0x20, 0x0, 0x0, 0x20 }, + { 0x510, 0x8, 0x0, 0x0, 0x4 }, + { 0x490, 0x8, 0x0, 0x0, 0x4 }, + { 0x10, 0x8, 0x0, 0x0, 0x2 }, + { 0x90, 0x8, 0x0, 0x0, 0x2 }, + { 0x4940, 0x0, 0x0, 0x0, 0x78 }, + { 0x3de0, 0x0, 0x0, 0x0, 0x78 }, + { 0x2998, 0x0, 0x0, 0x0, 0x78 }, + { 0x4750, 0x0, 0x0, 0x0, 0x78 }, + { 0x56d0, 0x0, 0x0, 0x0, 0x78 }, + { 0x7e50, 0x0, 0x0, 0x0, 0x78 }, + { 0x100, 0x8, 0x0, 0x0, 0x8 }, + { 0x5c10, 0x10, 0x0, 0x0, 0x10 }, + { 0xb508, 0x30, 0x0, 0x0, 0x30 }, + { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, + { 0x58a0, 0x40, 0x0, 0x0, 0x40 }, + { 0x200, 0x10, 0x0, 0x0, 0x8 }, + { 0xa230, 0x0, 0x0, 0x0, 0x4 }, + { 0x8058, 0x40, 0x0, 0x0, 0x30 }, + { 0xd00, 0x8, 0x0, 0x0, 0x8 }, + { 0x2b30, 0x80, 0x0, 0x0, 0x38 }, + { 0xa808, 0x0, 0x0, 0x0, 0xf0 }, + { 0xa8f8, 0x8, 0x0, 0x0, 0x8 }, + { 0x80, 0x8, 0x0, 0x0, 0x8 }, + { 0xac0, 0x8, 0x0, 0x0, 0x8 }, + { 0x2580, 0x8, 0x0, 0x0, 0x8 }, + { 0x2500, 0x8, 0x0, 0x0, 0x8 }, + { 0x440, 0x8, 0x0, 0x0, 0x2 }, + { 0x1800, 0x8, 0x0, 0x0, 0x2 }, + { 0x1a00, 0x10, 0x8, 0x0, 0x2 }, + { 0x640, 0x10, 0x8, 0x0, 0x2 }, + { 0xd9b8, 0x38, 0x0, 0x0, 0x24 }, + { 0x11048, 0x10, 0x0, 0x0, 0x8 }, + { 0x11678, 0x38, 0x0, 0x0, 0x18 }, + { 0xaec0, 0x30, 0x0, 0x0, 0x10 }, + { 0x8700, 0x28, 0x0, 0x0, 0x18 }, + { 0xec00, 0x10, 0x0, 0x0, 0x10 }, + { 0xde38, 0x40, 0x0, 0x0, 0x30 }, + { 0x121a8, 0x38, 0x0, 0x0, 0x8 }, + { 0xf068, 0x20, 0x0, 0x0, 0x20 }, + { 0x2b68, 0x80, 0x0, 0x0, 0x10 }, + { 0x4ab8, 0x10, 0x0, 0x0, 0x10 }, }; /* Runtime array offsets */ @@ -1866,426 +1831,427 @@ static const struct iro iro_arr[31] = { #define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 #define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 #define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 #define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 #define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 #define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2232 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 #define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6663 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 #define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6665 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 #define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6667 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703 #define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713 #define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129 #define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29834 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29836 #define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856 #define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29901 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29902 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29903 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29904 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29964 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29903 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29904 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29966 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30505 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30507 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30762 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30764 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30764 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30766 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30796 +#define QM_REG_RLPFCRD_RT_OFFSET 30798 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30812 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814 +#define QM_REG_RLPFENABLE_RT_OFFSET 30814 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30846 +#define QM_REG_WFQPFCRD_RT_OFFSET 30848 #define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31006 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31007 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31008 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31009 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31520 +#define QM_REG_TXPQMAP_RT_OFFSET 31522 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544 -#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 33056 +#define QM_REG_WFQVPCRD_RT_OFFSET 32546 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33568 +#define QM_REG_WFQVPMAP_RT_OFFSET 33058 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741 #define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431 - -#define RUNTIME_ARRAY_SIZE 34432 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922 + +#define RUNTIME_ARRAY_SIZE 33923 -/* The eth storm context for the Ystorm */ -struct ystorm_eth_conn_st_ctx { +/* The eth storm context for the Tstorm */ +struct tstorm_eth_conn_st_ctx { __le32 reserved[4]; }; @@ -2562,14 +2528,226 @@ struct xstorm_eth_conn_ag_ctx { __le16 word15 /* word15 */; }; -/* The eth storm context for the Tstorm */ -struct tstorm_eth_conn_st_ctx { - __le32 reserved[4]; +/* The eth storm context for the Ystorm */ +struct ystorm_eth_conn_st_ctx { + __le32 reserved[8]; }; -/* The eth storm context for the Mstorm */ -struct mstorm_eth_conn_st_ctx { - __le32 reserved[8]; +struct ystorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 terminate_spqe /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 tx_bd_cons_upd /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +struct tstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 rx_bd_cons /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 rx_bd_prod /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 + u8 flags2; +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 tx_bd_cons /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 tx_int_coallecing_timeset /* reg3 */; + __le16 tx_drv_bd_cons /* word2 */; + __le16 rx_drv_cqe_cons /* word3 */; }; /* The eth storm context for the Ustorm */ @@ -2577,24 +2755,30 @@ struct ustorm_eth_conn_st_ctx { __le32 reserved[40]; }; +/* The eth storm context for the Mstorm */ +struct mstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + /* eth connection context */ struct eth_conn_context { - struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2] /* padding */; + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; - struct regpair pstorm_st_padding[2] /* padding */; struct xstorm_eth_conn_st_ctx xstorm_st_context; struct xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct tstorm_eth_conn_st_ctx tstorm_st_context; - struct regpair tstorm_st_padding[2] /* padding */; - struct mstorm_eth_conn_st_ctx mstorm_st_context; + struct ystorm_eth_conn_st_ctx ystorm_st_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; + struct mstorm_eth_conn_st_ctx mstorm_st_context; }; enum eth_filter_action { ETH_FILTER_ACTION_REMOVE, ETH_FILTER_ACTION_ADD, - ETH_FILTER_ACTION_REPLACE, + ETH_FILTER_ACTION_REMOVE_ALL, MAX_ETH_FILTER_ACTION }; @@ -2653,6 +2837,32 @@ enum eth_ramrod_cmd_id { MAX_ETH_RAMROD_CMD_ID }; +enum eth_tx_err { + ETH_TX_ERR_DROP /* Drop erronous packet. */, + ETH_TX_ERR_ASSERT_MALICIOUS, + MAX_ETH_TX_ERR +}; + +struct eth_tx_err_vals { + __le16 values; +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +}; + struct eth_vport_rss_config { __le16 capabilities; #define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 @@ -2669,12 +2879,8 @@ struct eth_vport_rss_config { #define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8 -#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F -#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 u8 rss_id; u8 rss_mode; u8 update_rss_key; @@ -2713,7 +2919,19 @@ struct eth_vport_rx_mode { }; struct eth_vport_tpa_param { - u64 reserved[2]; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + u8 tpa_max_aggs_num; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; + u8 max_buff_num; + u8 reserved; }; struct eth_vport_tx_mode { @@ -2749,10 +2967,14 @@ struct rx_queue_start_ramrod_data { u8 pxp_tph_valid_pkt; u8 pxp_st_hint; __le16 pxp_st_index; - u8 reserved[4]; - struct regpair cqe_pbl_addr; - struct regpair bd_base; - struct regpair sge_base; + u8 pmd_mode; + u8 notify_en; + u8 toggle_val; + u8 reserved[7]; + __le16 reserved1; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair reserved2; }; struct rx_queue_stop_ramrod_data { @@ -2764,23 +2986,24 @@ struct rx_queue_stop_ramrod_data { }; struct rx_queue_update_ramrod_data { - __le16 rx_queue_id; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 init_sge_ring_flg; - u8 vport_id; - u8 pxp_tph_valid_sge; - u8 pxp_st_hint; - __le16 pxp_st_index; - u8 reserved[6]; - struct regpair sge_base; + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[4]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; + struct regpair reserved6; }; struct tx_queue_start_ramrod_data { __le16 sb_id; u8 sb_index; u8 vport_id; - u8 tc; + u8 reserved0; u8 stats_counter_id; __le16 qm_pq_id; u8 flags; @@ -2790,18 +3013,25 @@ struct tx_queue_start_ramrod_data { #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F -#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3 - u8 pin_context; - u8 pxp_tph_valid_bd; - u8 pxp_tph_valid_pkt; - __le16 pxp_st_index; - u8 pxp_st_hint; - u8 reserved1[3]; - __le16 queue_zone_id; - __le16 test_dup_count; - __le16 pbl_size; - struct regpair pbl_base_addr; +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 + u8 pxp_st_hint; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + __le16 comp_agg_size; + __le16 queue_zone_id; + __le16 test_dup_count; + __le16 pbl_size; + __le16 tx_queue_id; + struct regpair pbl_base_addr; + struct regpair bd_cons_address; }; struct tx_queue_stop_ramrod_data { @@ -2822,16 +3052,16 @@ struct vport_start_ramrod_data { struct eth_vport_rx_mode rx_mode; struct eth_vport_tx_mode tx_mode; struct eth_vport_tpa_param tpa_param; - __le16 sge_buff_size; - u8 max_sges_num; - u8 tx_switching_en; - u8 anti_spoofing_en; - u8 default_vlan_en; - u8 handle_ptp_pkts; - u8 silent_vlan_removal_en; - __le16 default_vlan; - u8 untagged; - u8 reserved[7]; + __le16 default_vlan; + u8 tx_switching_en; + u8 anti_spoofing_en; + u8 default_vlan_en; + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + u8 untagged; + struct eth_tx_err_vals tx_err_behav; + u8 zero_placement_offset; + u8 reserved[7]; }; struct vport_stop_ramrod_data { @@ -2840,36 +3070,35 @@ struct vport_stop_ramrod_data { }; struct vport_update_ramrod_data_cmn { - u8 vport_id; - u8 update_rx_active_flg; - u8 rx_active_flg; - u8 update_tx_active_flg; - u8 tx_active_flg; - u8 update_rx_mode_flg; - u8 update_tx_mode_flg; - u8 update_approx_mcast_flg; - u8 update_rss_flg; - u8 update_inner_vlan_removal_en_flg; - u8 inner_vlan_removal_en; - u8 update_tpa_param_flg; - u8 update_tpa_en_flg; - u8 update_sge_param_flg; - __le16 sge_buff_size; - u8 max_sges_num; - u8 update_tx_switching_en_flg; - u8 tx_switching_en; - u8 update_anti_spoofing_en_flg; - u8 anti_spoofing_en; - u8 update_handle_ptp_pkts; - u8 handle_ptp_pkts; - u8 update_default_vlan_en_flg; - u8 default_vlan_en; - u8 update_default_vlan_flg; - __le16 default_vlan; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - u8 silent_vlan_removal_en; - u8 reserved; + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_tx_switching_en_flg; + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + u8 default_vlan_en; + u8 update_default_vlan_flg; + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 update_mtu_flg; + __le16 mtu; + u8 reserved[2]; }; struct vport_update_ramrod_mcast { @@ -2885,436 +3114,6 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct mstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */ -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0 /* word0 */; - __le16 word1 /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; -}; - -struct tstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 rx_bd_cons /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 rx_bd_prod /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; -}; - -struct ustorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 - u8 flags2; -#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 tx_bd_cons /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 tx_drv_bd_cons /* word2 */; - __le16 rx_drv_cqe_cons /* word3 */; -}; - -struct xstorm_eth_hw_conn_ag_ctx { - u8 reserved0 /* cdu_validation */; - u8 eth_state /* state */; - u8 flags0; -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 - u8 flags1; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 - u8 flags2; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 - u8 flags3; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 - u8 flags4; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 - u8 flags5; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 - u8 flags6; -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 - u8 flags7; -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 - u8 flags8; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 - u8 flags9; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 - u8 flags10; -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 - u8 flags11; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 - u8 flags12; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 - u8 flags13; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 - u8 flags14; -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 - u8 edpm_event_id /* byte2 */; - __le16 physical_q0 /* physical_q0 */; - __le16 word1 /* physical_q1 */; - __le16 edpm_num_bds /* physical_q2 */; - __le16 tx_bd_cons /* word3 */; - __le16 tx_bd_prod /* word4 */; - __le16 go_to_bd_cons /* word5 */; - __le16 conn_dpi /* conn_dpi */; -}; - #define VF_MAX_STATIC 192 /* In case of K2 */ #define MCP_GLOB_PATH_MAX 2 @@ -3818,6 +3617,13 @@ struct public_port { struct dcbx_local_params local_admin_dcbx_mib; struct dcbx_mib remote_dcbx_mib; struct dcbx_mib operational_dcbx_mib; + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + u32 transceiver_data; +#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF +#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001 }; /**************************************/ @@ -3830,7 +3636,11 @@ struct public_func { u32 iscsi_boot_signature; u32 iscsi_boot_block_offset; - u32 reserved[8]; + u32 mtu_size; + u32 c2s_pcp_map_lower; + u32 c2s_pcp_map_upper; + u32 c2s_pcp_map_default; + u32 reserved[4]; u32 config; @@ -3894,10 +3704,10 @@ struct public_func { #define DRV_ID_MCP_HSI_VER_SHIFT 16 #define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT) -#define DRV_ID_DRV_TYPE_MASK 0xff000000 +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 #define DRV_ID_DRV_TYPE_SHIFT 24 #define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT) @@ -3905,6 +3715,10 @@ struct public_func { #define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) }; /**************************************/ @@ -3964,6 +3778,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_MASK 0xffff0000 #define DRV_MSG_CODE_LOAD_REQ 0x10000000 #define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_INIT_HW 0x12000000 #define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 #define DRV_MSG_CODE_INIT_PHY 0x22000000 @@ -4100,6 +3915,7 @@ struct public_drv_mb { #define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 #define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 #define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 +#define FW_MSG_CODE_OK 0x00160000 #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff @@ -4142,6 +3958,14 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_MAX }; @@ -4212,7 +4036,7 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 #define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 #define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 #define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 #define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 #define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 @@ -4643,8 +4467,12 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 - - u32 reserved[46]; /* 0x88 */ + u32 device_capabilities; /* 0x88 */ +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 + u32 power_dissipated; /* 0x8C */ + u32 power_consumed; /* 0x90 */ + u32 efi_version; /* 0x94 */ + u32 reserved[42]; /* 0x98 */ }; struct nvm_cfg1_path { @@ -4652,26 +4480,8 @@ struct nvm_cfg1_path { }; struct nvm_cfg1_port { - u32 power_dissipated; /* 0x0 */ -#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF -#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0 -#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00 -#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8 -#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000 -#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16 -#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000 -#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24 - - u32 power_consumed; /* 0x4 */ -#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF -#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0 -#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00 -#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8 -#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000 -#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16 -#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000 -#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24 - + u32 reserved__m_relocated_to_option_123; /* 0x0 */ + u32 reserved__m_relocated_to_option_124; /* 0x4 */ u32 generic_cont0; /* 0x8 */ #define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF #define NVM_CFG1_PORT_LED_MODE_OFFSET 0 @@ -4699,7 +4509,9 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 - +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 u32 pcie_cfg; /* 0xC */ #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 #define NVM_CFG1_PORT_RESERVED15_OFFSET 0 @@ -4784,10 +4596,11 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31 #define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 #define NVM_CFG1_PORT_AN_MODE_OFFSET 24 #define NVM_CFG1_PORT_AN_MODE_NONE 0x0 @@ -4801,9 +4614,6 @@ struct nvm_cfg1_port { u32 mgmt_traffic; /* 0x20 */ #define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F #define NVM_CFG1_PORT_RESERVED61_OFFSET 0 -#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1 -#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2 u32 ext_phy; /* 0x24 */ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF @@ -4814,16 +4624,12 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 u32 mba_cfg1; /* 0x28 */ -#define NVM_CFG1_PORT_MBA_MASK 0x00000001 -#define NVM_CFG1_PORT_MBA_OFFSET 0 -#define NVM_CFG1_PORT_MBA_DISABLED 0x0 -#define NVM_CFG1_PORT_MBA_ENABLED 0x1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3 +#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001 +#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1 #define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 #define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 @@ -4836,61 +4642,30 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 #define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 #define NVM_CFG1_PORT_RESERVED5_OFFSET 9 -#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED5_2K 0x1 -#define NVM_CFG1_PORT_RESERVED5_4K 0x2 -#define NVM_CFG1_PORT_RESERVED5_8K 0x3 -#define NVM_CFG1_PORT_RESERVED5_16K 0x4 -#define NVM_CFG1_PORT_RESERVED5_32K 0x5 -#define NVM_CFG1_PORT_RESERVED5_64K 0x6 -#define NVM_CFG1_PORT_RESERVED5_128K 0x7 -#define NVM_CFG1_PORT_RESERVED5_256K 0x8 -#define NVM_CFG1_PORT_RESERVED5_512K 0x9 -#define NVM_CFG1_PORT_RESERVED5_1M 0xA -#define NVM_CFG1_PORT_RESERVED5_2M 0xB -#define NVM_CFG1_PORT_RESERVED5_4M 0xC -#define NVM_CFG1_PORT_RESERVED5_8M 0xD -#define NVM_CFG1_PORT_RESERVED5_16M 0xE -#define NVM_CFG1_PORT_RESERVED5_32M 0xF -#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 -#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21 u32 mba_cfg2; /* 0x2C */ -#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF -#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0 -#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000 -#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16 +#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF +#define NVM_CFG1_PORT_RESERVED65_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000 +#define NVM_CFG1_PORT_RESERVED66_OFFSET 16 u32 vf_cfg; /* 0x30 */ #define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF #define NVM_CFG1_PORT_RESERVED8_OFFSET 0 #define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 #define NVM_CFG1_PORT_RESERVED6_OFFSET 16 -#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED6_4K 0x1 -#define NVM_CFG1_PORT_RESERVED6_8K 0x2 -#define NVM_CFG1_PORT_RESERVED6_16K 0x3 -#define NVM_CFG1_PORT_RESERVED6_32K 0x4 -#define NVM_CFG1_PORT_RESERVED6_64K 0x5 -#define NVM_CFG1_PORT_RESERVED6_128K 0x6 -#define NVM_CFG1_PORT_RESERVED6_256K 0x7 -#define NVM_CFG1_PORT_RESERVED6_512K 0x8 -#define NVM_CFG1_PORT_RESERVED6_1M 0x9 -#define NVM_CFG1_PORT_RESERVED6_2M 0xA -#define NVM_CFG1_PORT_RESERVED6_4M 0xB -#define NVM_CFG1_PORT_RESERVED6_8M 0xC -#define NVM_CFG1_PORT_RESERVED6_16M 0xD -#define NVM_CFG1_PORT_RESERVED6_32M 0xE -#define NVM_CFG1_PORT_RESERVED6_64M 0xF struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ @@ -4973,18 +4748,16 @@ struct nvm_cfg1_func { u32 device_id; /* 0x10 */ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 -#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16 +#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16 u32 cmn_cfg; /* 0x14 */ -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7 #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 #define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 @@ -5029,8 +4802,8 @@ struct nvm_cfg1_func { struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */ - - u32 reserved[9]; /* 0x2C */ + u32 preboot_generic_cfg; /* 0x2C */ + u32 reserved[8]; /* 0x30 */ }; struct nvm_cfg1 { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index ffa99273b353..a95a3e4b3101 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -44,7 +44,7 @@ struct qed_ptt_pool { int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) { struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), - GFP_ATOMIC); + GFP_KERNEL); int i; if (!p_pool) diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 0b21a553cc7d..f55ebdc3c832 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, * Return -1 on error. */ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, - u8 start_vport, u8 num_vports, struct init_qm_vport_params *vport_params) { - u8 tc, i, vport_id; u32 inc_val; + u8 tc, i; /* go over all PF VPORTs */ - for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { - u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET; - u16 *pq_ids = &vport_params[i].first_tx_pq_id[0]; + for (i = 0; i < num_vports; i++) { if (!vport_params[i].vport_wfq) continue; @@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, * different TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { - u16 vport_pq_id = pq_ids[tc]; + u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) { STORE_RT_REG(p_hwfn, - QM_REG_WFQVPWEIGHT_RT_OFFSET + - vport_pq_id, inc_val); - STORE_RT_REG(p_hwfn, temp + vport_pq_id, - QM_WFQ_UPPER_BOUND | - QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, - QM_WFQ_INIT_CRD(inc_val) | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); } } } @@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) return -1; - if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport, - p_params->num_vports, vport_params)) + if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 796f1390e598..3269b3610e03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -55,63 +55,98 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) int i; for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) - p_hwfn->rt_data[i].b_valid = false; + p_hwfn->rt_data.b_valid[i] = false; } void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { - p_hwfn->rt_data[rt_offset].init_val = val; - p_hwfn->rt_data[rt_offset].b_valid = true; + p_hwfn->rt_data.init_val[rt_offset] = val; + p_hwfn->rt_data.b_valid[rt_offset] = true; } void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, - u32 rt_offset, - u32 *val, + u32 rt_offset, u32 *p_val, size_t size) { size_t i; for (i = 0; i < size / sizeof(u32); i++) { - p_hwfn->rt_data[rt_offset + i].init_val = val[i]; - p_hwfn->rt_data[rt_offset + i].b_valid = true; + p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; + p_hwfn->rt_data.b_valid[rt_offset + i] = true; } } -static void qed_init_rt(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 addr, - u32 rt_offset, - u32 size) +static int qed_init_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u16 rt_offset, + u16 size, + bool b_must_dmae) { - struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset; - u32 i; + u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; + bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; + u16 i, segment; + int rc = 0; + /* Since not all RT entries are initialized, go over the RT and + * for each segment of initialized values use DMA. + */ for (i = 0; i < size; i++) { - if (!rt_data[i].b_valid) + if (!p_valid[i]) continue; - qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val); + + /* In case there isn't any wide-bus configuration here, + * simply write the data instead of using dmae. + */ + if (!b_must_dmae) { + qed_wr(p_hwfn, p_ptt, addr + (i << 2), + p_init_val[i]); + continue; + } + + /* Start of a new segment */ + for (segment = 1; i + segment < size; segment++) + if (!p_valid[i + segment]) + break; + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(p_init_val + i), + addr + (i << 2), segment, 0); + if (rc != 0) + return rc; + + /* Jump over the entire segment, including invalid entry */ + i += segment; } + + return rc; } int qed_init_alloc(struct qed_hwfn *p_hwfn) { - struct qed_rt_data *rt_data; + struct qed_rt_data *rt_data = &p_hwfn->rt_data; - rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC); - if (!rt_data) + rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, + GFP_KERNEL); + if (!rt_data->b_valid) return -ENOMEM; - p_hwfn->rt_data = rt_data; + rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE, + GFP_KERNEL); + if (!rt_data->init_val) { + kfree(rt_data->b_valid); + return -ENOMEM; + } return 0; } void qed_init_free(struct qed_hwfn *p_hwfn) { - kfree(p_hwfn->rt_data); - p_hwfn->rt_data = NULL; + kfree(p_hwfn->rt_data.init_val); + kfree(p_hwfn->rt_data.b_valid); } static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, @@ -289,7 +324,8 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, case INIT_SRC_RUNTIME: qed_init_rt(p_hwfn, p_ptt, addr, le16_to_cpu(arg->runtime.offset), - le16_to_cpu(arg->runtime.size)); + le16_to_cpu(arg->runtime.size), + b_must_dmae); break; } @@ -316,49 +352,50 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_read_op *cmd) { - u32 data = le32_to_cpu(cmd->op_data); - u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + bool (*comp_check)(u32 val, u32 expected_val); + u32 delay = QED_INIT_POLL_PERIOD_US, val; + u32 data, addr, poll; + int i; + + data = le32_to_cpu(cmd->op_data); + addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); - bool (*comp_check)(u32 val, - u32 expected_val); - u32 delay = QED_INIT_POLL_PERIOD_US, val; val = qed_rd(p_hwfn, p_ptt, addr); - data = le32_to_cpu(cmd->op_data); - if (GET_FIELD(data, INIT_READ_OP_POLL)) { - int i; + if (poll == INIT_POLL_NONE) + return; - switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) { - case INIT_COMPARISON_EQ: - comp_check = comp_eq; - break; - case INIT_COMPARISON_OR: - comp_check = comp_or; - break; - case INIT_COMPARISON_AND: - comp_check = comp_and; - break; - default: - comp_check = NULL; - DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", - data); - return; - } + switch (poll) { + case INIT_POLL_EQ: + comp_check = comp_eq; + break; + case INIT_POLL_OR: + comp_check = comp_or; + break; + case INIT_POLL_AND: + comp_check = comp_and; + break; + default: + DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", + cmd->op_data); + return; + } - for (i = 0; - i < QED_INIT_MAX_POLL_COUNT && - !comp_check(val, le32_to_cpu(cmd->expected_val)); - i++) { - udelay(delay); - val = qed_rd(p_hwfn, p_ptt, addr); - } + data = le32_to_cpu(cmd->expected_val); + for (i = 0; + i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data); + i++) { + udelay(delay); + val = qed_rd(p_hwfn, p_ptt, addr); + } - if (i == QED_INIT_MAX_POLL_COUNT) - DP_ERR(p_hwfn, - "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", - addr, le32_to_cpu(cmd->expected_val), - val, data); + if (i == QED_INIT_MAX_POLL_COUNT) { + DP_ERR(p_hwfn, + "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", + addr, le32_to_cpu(cmd->expected_val), + val, le32_to_cpu(cmd->op_data)); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 9cc9d62c1fec..ffd0accc2ec9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -39,24 +39,1737 @@ struct qed_sb_sp_info { struct qed_pi_info pi_info_arr[PIS_PER_SB]; }; +enum qed_attention_type { + QED_ATTN_TYPE_ATTN, + QED_ATTN_TYPE_PARITY, +}; + #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) -#define ATTN_STATE_BITS (0xfff) +struct aeu_invert_reg_bit { + char bit_name[30]; + +#define ATTENTION_PARITY (1 << 0) + +#define ATTENTION_LENGTH_MASK (0x00000ff0) +#define ATTENTION_LENGTH_SHIFT (4) +#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ + ATTENTION_LENGTH_SHIFT) +#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) +#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) +#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ + ATTENTION_PARITY) + +/* Multiple bits start with this offset */ +#define ATTENTION_OFFSET_MASK (0x000ff000) +#define ATTENTION_OFFSET_SHIFT (12) + unsigned int flags; + + /* Callback to call if attention will be triggered */ + int (*cb)(struct qed_hwfn *p_hwfn); + + enum block_id block_index; +}; + +struct aeu_invert_reg { + struct aeu_invert_reg_bit bits[32]; +}; + +#define MAX_ATTN_GRPS (8) +#define NUM_ATTN_REGS (9) + +/* HW Attention register */ +struct attn_hw_reg { + u16 reg_idx; /* Index of this register in its block */ + u16 num_of_bits; /* number of valid attention bits */ + u32 sts_addr; /* Address of the STS register */ + u32 sts_clr_addr; /* Address of the STS_CLR register */ + u32 sts_wr_addr; /* Address of the STS_WR register */ + u32 mask_addr; /* Address of the MASK register */ +}; + +/* HW block attention registers */ +struct attn_hw_regs { + u16 num_of_int_regs; /* Number of interrupt regs */ + u16 num_of_prty_regs; /* Number of parity regs */ + struct attn_hw_reg **int_regs; /* interrupt regs */ + struct attn_hw_reg **prty_regs; /* parity regs */ +}; + +/* HW block attention registers */ +struct attn_hw_block { + const char *name; /* Block name */ + struct attn_hw_regs chip_regs[1]; +}; + +static struct attn_hw_reg grc_int0_bb_b0 = { + 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184}; + +static struct attn_hw_reg *grc_int_bb_b0_regs[1] = { + &grc_int0_bb_b0}; + +static struct attn_hw_reg grc_prty1_bb_b0 = { + 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204}; + +static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = { + &grc_prty1_bb_b0}; + +static struct attn_hw_reg miscs_int0_bb_b0 = { + 0, 3, 0x9180, 0x918c, 0x9188, 0x9184}; + +static struct attn_hw_reg miscs_int1_bb_b0 = { + 1, 11, 0x9190, 0x919c, 0x9198, 0x9194}; + +static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = { + &miscs_int0_bb_b0, &miscs_int1_bb_b0}; + +static struct attn_hw_reg miscs_prty0_bb_b0 = { + 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4}; + +static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = { + &miscs_prty0_bb_b0}; + +static struct attn_hw_reg misc_int0_bb_b0 = { + 0, 1, 0x8180, 0x818c, 0x8188, 0x8184}; + +static struct attn_hw_reg *misc_int_bb_b0_regs[1] = { + &misc_int0_bb_b0}; + +static struct attn_hw_reg pglue_b_int0_bb_b0 = { + 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184}; + +static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = { + &pglue_b_int0_bb_b0}; + +static struct attn_hw_reg pglue_b_prty0_bb_b0 = { + 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194}; + +static struct attn_hw_reg pglue_b_prty1_bb_b0 = { + 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204}; + +static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = { + &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0}; + +static struct attn_hw_reg cnig_int0_bb_b0 = { + 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec}; + +static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = { + &cnig_int0_bb_b0}; + +static struct attn_hw_reg cnig_prty0_bb_b0 = { + 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c}; + +static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = { + &cnig_prty0_bb_b0}; + +static struct attn_hw_reg cpmu_int0_bb_b0 = { + 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4}; + +static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = { + &cpmu_int0_bb_b0}; + +static struct attn_hw_reg ncsi_int0_bb_b0 = { + 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0}; + +static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = { + &ncsi_int0_bb_b0}; + +static struct attn_hw_reg ncsi_prty1_bb_b0 = { + 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004}; + +static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = { + &ncsi_prty1_bb_b0}; + +static struct attn_hw_reg opte_prty1_bb_b0 = { + 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004}; + +static struct attn_hw_reg opte_prty0_bb_b0 = { + 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c}; + +static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = { + &opte_prty1_bb_b0, &opte_prty0_bb_b0}; + +static struct attn_hw_reg bmb_int0_bb_b0 = { + 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4}; + +static struct attn_hw_reg bmb_int1_bb_b0 = { + 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc}; + +static struct attn_hw_reg bmb_int2_bb_b0 = { + 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4}; + +static struct attn_hw_reg bmb_int3_bb_b0 = { + 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c}; + +static struct attn_hw_reg bmb_int4_bb_b0 = { + 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124}; + +static struct attn_hw_reg bmb_int5_bb_b0 = { + 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c}; + +static struct attn_hw_reg bmb_int6_bb_b0 = { + 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154}; + +static struct attn_hw_reg bmb_int7_bb_b0 = { + 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c}; + +static struct attn_hw_reg bmb_int8_bb_b0 = { + 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188}; + +static struct attn_hw_reg bmb_int9_bb_b0 = { + 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0}; + +static struct attn_hw_reg bmb_int10_bb_b0 = { + 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8}; + +static struct attn_hw_reg bmb_int11_bb_b0 = { + 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0}; + +static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = { + &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0, + &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0, + &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0}; + +static struct attn_hw_reg bmb_prty0_bb_b0 = { + 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0}; + +static struct attn_hw_reg bmb_prty1_bb_b0 = { + 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404}; + +static struct attn_hw_reg bmb_prty2_bb_b0 = { + 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414}; + +static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = { + &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0}; + +static struct attn_hw_reg pcie_prty1_bb_b0 = { + 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004}; + +static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = { + &pcie_prty1_bb_b0}; + +static struct attn_hw_reg mcp2_prty0_bb_b0 = { + 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044}; + +static struct attn_hw_reg mcp2_prty1_bb_b0 = { + 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208}; + +static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = { + &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0}; + +static struct attn_hw_reg pswhst_int0_bb_b0 = { + 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184}; + +static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = { + &pswhst_int0_bb_b0}; + +static struct attn_hw_reg pswhst_prty0_bb_b0 = { + 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194}; + +static struct attn_hw_reg pswhst_prty1_bb_b0 = { + 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204}; + +static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = { + &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0}; + +static struct attn_hw_reg pswhst2_int0_bb_b0 = { + 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184}; + +static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = { + &pswhst2_int0_bb_b0}; + +static struct attn_hw_reg pswhst2_prty0_bb_b0 = { + 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194}; + +static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = { + &pswhst2_prty0_bb_b0}; + +static struct attn_hw_reg pswrd_int0_bb_b0 = { + 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184}; + +static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = { + &pswrd_int0_bb_b0}; + +static struct attn_hw_reg pswrd_prty0_bb_b0 = { + 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194}; + +static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = { + &pswrd_prty0_bb_b0}; + +static struct attn_hw_reg pswrd2_int0_bb_b0 = { + 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184}; + +static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = { + &pswrd2_int0_bb_b0}; + +static struct attn_hw_reg pswrd2_prty0_bb_b0 = { + 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194}; + +static struct attn_hw_reg pswrd2_prty1_bb_b0 = { + 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204}; + +static struct attn_hw_reg pswrd2_prty2_bb_b0 = { + 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214}; + +static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = { + &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0}; + +static struct attn_hw_reg pswwr_int0_bb_b0 = { + 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184}; + +static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = { + &pswwr_int0_bb_b0}; + +static struct attn_hw_reg pswwr_prty0_bb_b0 = { + 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194}; + +static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = { + &pswwr_prty0_bb_b0}; + +static struct attn_hw_reg pswwr2_int0_bb_b0 = { + 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184}; + +static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = { + &pswwr2_int0_bb_b0}; + +static struct attn_hw_reg pswwr2_prty0_bb_b0 = { + 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194}; + +static struct attn_hw_reg pswwr2_prty1_bb_b0 = { + 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204}; + +static struct attn_hw_reg pswwr2_prty2_bb_b0 = { + 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214}; + +static struct attn_hw_reg pswwr2_prty3_bb_b0 = { + 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224}; + +static struct attn_hw_reg pswwr2_prty4_bb_b0 = { + 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234}; + +static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = { + &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0, + &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0}; + +static struct attn_hw_reg pswrq_int0_bb_b0 = { + 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184}; + +static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = { + &pswrq_int0_bb_b0}; + +static struct attn_hw_reg pswrq_prty0_bb_b0 = { + 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194}; + +static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = { + &pswrq_prty0_bb_b0}; + +static struct attn_hw_reg pswrq2_int0_bb_b0 = { + 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184}; + +static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = { + &pswrq2_int0_bb_b0}; + +static struct attn_hw_reg pswrq2_prty1_bb_b0 = { + 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204}; + +static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = { + &pswrq2_prty1_bb_b0}; + +static struct attn_hw_reg pglcs_int0_bb_b0 = { + 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04}; + +static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = { + &pglcs_int0_bb_b0}; + +static struct attn_hw_reg dmae_int0_bb_b0 = { + 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184}; + +static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = { + &dmae_int0_bb_b0}; + +static struct attn_hw_reg dmae_prty1_bb_b0 = { + 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204}; + +static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = { + &dmae_prty1_bb_b0}; + +static struct attn_hw_reg ptu_int0_bb_b0 = { + 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184}; + +static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = { + &ptu_int0_bb_b0}; + +static struct attn_hw_reg ptu_prty1_bb_b0 = { + 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204}; + +static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = { + &ptu_prty1_bb_b0}; + +static struct attn_hw_reg tcm_int0_bb_b0 = { + 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184}; + +static struct attn_hw_reg tcm_int1_bb_b0 = { + 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194}; + +static struct attn_hw_reg tcm_int2_bb_b0 = { + 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4}; + +static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = { + &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0}; + +static struct attn_hw_reg tcm_prty1_bb_b0 = { + 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204}; + +static struct attn_hw_reg tcm_prty2_bb_b0 = { + 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214}; + +static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = { + &tcm_prty1_bb_b0, &tcm_prty2_bb_b0}; + +static struct attn_hw_reg mcm_int0_bb_b0 = { + 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184}; + +static struct attn_hw_reg mcm_int1_bb_b0 = { + 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194}; + +static struct attn_hw_reg mcm_int2_bb_b0 = { + 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4}; + +static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = { + &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0}; + +static struct attn_hw_reg mcm_prty1_bb_b0 = { + 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204}; + +static struct attn_hw_reg mcm_prty2_bb_b0 = { + 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214}; + +static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = { + &mcm_prty1_bb_b0, &mcm_prty2_bb_b0}; + +static struct attn_hw_reg ucm_int0_bb_b0 = { + 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184}; + +static struct attn_hw_reg ucm_int1_bb_b0 = { + 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194}; + +static struct attn_hw_reg ucm_int2_bb_b0 = { + 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4}; + +static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = { + &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0}; + +static struct attn_hw_reg ucm_prty1_bb_b0 = { + 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204}; + +static struct attn_hw_reg ucm_prty2_bb_b0 = { + 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214}; + +static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = { + &ucm_prty1_bb_b0, &ucm_prty2_bb_b0}; + +static struct attn_hw_reg xcm_int0_bb_b0 = { + 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184}; + +static struct attn_hw_reg xcm_int1_bb_b0 = { + 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194}; + +static struct attn_hw_reg xcm_int2_bb_b0 = { + 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4}; + +static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = { + &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0}; + +static struct attn_hw_reg xcm_prty1_bb_b0 = { + 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204}; + +static struct attn_hw_reg xcm_prty2_bb_b0 = { + 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214}; + +static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = { + &xcm_prty1_bb_b0, &xcm_prty2_bb_b0}; + +static struct attn_hw_reg ycm_int0_bb_b0 = { + 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184}; + +static struct attn_hw_reg ycm_int1_bb_b0 = { + 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194}; + +static struct attn_hw_reg ycm_int2_bb_b0 = { + 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4}; + +static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = { + &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0}; + +static struct attn_hw_reg ycm_prty1_bb_b0 = { + 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204}; + +static struct attn_hw_reg ycm_prty2_bb_b0 = { + 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214}; + +static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = { + &ycm_prty1_bb_b0, &ycm_prty2_bb_b0}; + +static struct attn_hw_reg pcm_int0_bb_b0 = { + 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184}; + +static struct attn_hw_reg pcm_int1_bb_b0 = { + 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194}; + +static struct attn_hw_reg pcm_int2_bb_b0 = { + 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4}; + +static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = { + &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0}; + +static struct attn_hw_reg pcm_prty1_bb_b0 = { + 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204}; + +static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = { + &pcm_prty1_bb_b0}; + +static struct attn_hw_reg qm_int0_bb_b0 = { + 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184}; + +static struct attn_hw_reg *qm_int_bb_b0_regs[1] = { + &qm_int0_bb_b0}; + +static struct attn_hw_reg qm_prty0_bb_b0 = { + 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194}; + +static struct attn_hw_reg qm_prty1_bb_b0 = { + 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204}; + +static struct attn_hw_reg qm_prty2_bb_b0 = { + 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214}; + +static struct attn_hw_reg qm_prty3_bb_b0 = { + 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224}; + +static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = { + &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0}; + +static struct attn_hw_reg tm_int0_bb_b0 = { + 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184}; + +static struct attn_hw_reg tm_int1_bb_b0 = { + 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194}; + +static struct attn_hw_reg *tm_int_bb_b0_regs[2] = { + &tm_int0_bb_b0, &tm_int1_bb_b0}; + +static struct attn_hw_reg tm_prty1_bb_b0 = { + 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204}; + +static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = { + &tm_prty1_bb_b0}; + +static struct attn_hw_reg dorq_int0_bb_b0 = { + 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184}; + +static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = { + &dorq_int0_bb_b0}; + +static struct attn_hw_reg dorq_prty0_bb_b0 = { + 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194}; + +static struct attn_hw_reg dorq_prty1_bb_b0 = { + 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204}; + +static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = { + &dorq_prty0_bb_b0, &dorq_prty1_bb_b0}; + +static struct attn_hw_reg brb_int0_bb_b0 = { + 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4}; + +static struct attn_hw_reg brb_int1_bb_b0 = { + 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc}; + +static struct attn_hw_reg brb_int2_bb_b0 = { + 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4}; + +static struct attn_hw_reg brb_int3_bb_b0 = { + 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c}; + +static struct attn_hw_reg brb_int4_bb_b0 = { + 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124}; + +static struct attn_hw_reg brb_int5_bb_b0 = { + 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c}; + +static struct attn_hw_reg brb_int6_bb_b0 = { + 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154}; + +static struct attn_hw_reg brb_int7_bb_b0 = { + 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c}; + +static struct attn_hw_reg brb_int8_bb_b0 = { + 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188}; + +static struct attn_hw_reg brb_int9_bb_b0 = { + 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0}; + +static struct attn_hw_reg brb_int10_bb_b0 = { + 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8}; + +static struct attn_hw_reg brb_int11_bb_b0 = { + 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0}; + +static struct attn_hw_reg *brb_int_bb_b0_regs[12] = { + &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0, + &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0, + &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0}; + +static struct attn_hw_reg brb_prty0_bb_b0 = { + 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0}; + +static struct attn_hw_reg brb_prty1_bb_b0 = { + 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404}; + +static struct attn_hw_reg brb_prty2_bb_b0 = { + 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414}; + +static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = { + &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0}; + +static struct attn_hw_reg src_int0_bb_b0 = { + 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4}; + +static struct attn_hw_reg *src_int_bb_b0_regs[1] = { + &src_int0_bb_b0}; + +static struct attn_hw_reg prs_int0_bb_b0 = { + 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044}; + +static struct attn_hw_reg *prs_int_bb_b0_regs[1] = { + &prs_int0_bb_b0}; + +static struct attn_hw_reg prs_prty0_bb_b0 = { + 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054}; + +static struct attn_hw_reg prs_prty1_bb_b0 = { + 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208}; + +static struct attn_hw_reg prs_prty2_bb_b0 = { + 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218}; + +static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = { + &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0}; + +static struct attn_hw_reg tsdm_int0_bb_b0 = { + 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044}; + +static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = { + &tsdm_int0_bb_b0}; + +static struct attn_hw_reg tsdm_prty1_bb_b0 = { + 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204}; + +static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = { + &tsdm_prty1_bb_b0}; + +static struct attn_hw_reg msdm_int0_bb_b0 = { + 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044}; + +static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = { + &msdm_int0_bb_b0}; + +static struct attn_hw_reg msdm_prty1_bb_b0 = { + 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204}; + +static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = { + &msdm_prty1_bb_b0}; + +static struct attn_hw_reg usdm_int0_bb_b0 = { + 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044}; + +static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = { + &usdm_int0_bb_b0}; + +static struct attn_hw_reg usdm_prty1_bb_b0 = { + 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204}; + +static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = { + &usdm_prty1_bb_b0}; + +static struct attn_hw_reg xsdm_int0_bb_b0 = { + 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044}; + +static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = { + &xsdm_int0_bb_b0}; + +static struct attn_hw_reg xsdm_prty1_bb_b0 = { + 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204}; + +static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = { + &xsdm_prty1_bb_b0}; + +static struct attn_hw_reg ysdm_int0_bb_b0 = { + 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044}; + +static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = { + &ysdm_int0_bb_b0}; + +static struct attn_hw_reg ysdm_prty1_bb_b0 = { + 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204}; + +static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = { + &ysdm_prty1_bb_b0}; + +static struct attn_hw_reg psdm_int0_bb_b0 = { + 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044}; + +static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = { + &psdm_int0_bb_b0}; + +static struct attn_hw_reg psdm_prty1_bb_b0 = { + 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204}; + +static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = { + &psdm_prty1_bb_b0}; + +static struct attn_hw_reg tsem_int0_bb_b0 = { + 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044}; + +static struct attn_hw_reg tsem_int1_bb_b0 = { + 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054}; + +static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044}; + +static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = { + &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg tsem_prty0_bb_b0 = { + 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc}; + +static struct attn_hw_reg tsem_prty1_bb_b0 = { + 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204}; + +static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = { + &tsem_prty0_bb_b0, &tsem_prty1_bb_b0, + &tsem_fast_memory_vfc_config_prty1_bb_b0}; + +static struct attn_hw_reg msem_int0_bb_b0 = { + 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044}; + +static struct attn_hw_reg msem_int1_bb_b0 = { + 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054}; + +static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044}; + +static struct attn_hw_reg *msem_int_bb_b0_regs[3] = { + &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg msem_prty0_bb_b0 = { + 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc}; + +static struct attn_hw_reg msem_prty1_bb_b0 = { + 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204}; + +static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = { + &msem_prty0_bb_b0, &msem_prty1_bb_b0}; + +static struct attn_hw_reg usem_int0_bb_b0 = { + 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044}; + +static struct attn_hw_reg usem_int1_bb_b0 = { + 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054}; + +static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044}; + +static struct attn_hw_reg *usem_int_bb_b0_regs[3] = { + &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg usem_prty0_bb_b0 = { + 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc}; + +static struct attn_hw_reg usem_prty1_bb_b0 = { + 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204}; + +static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = { + &usem_prty0_bb_b0, &usem_prty1_bb_b0}; + +static struct attn_hw_reg xsem_int0_bb_b0 = { + 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044}; + +static struct attn_hw_reg xsem_int1_bb_b0 = { + 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054}; + +static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044}; + +static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = { + &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg xsem_prty0_bb_b0 = { + 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc}; + +static struct attn_hw_reg xsem_prty1_bb_b0 = { + 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204}; + +static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = { + &xsem_prty0_bb_b0, &xsem_prty1_bb_b0}; + +static struct attn_hw_reg ysem_int0_bb_b0 = { + 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044}; + +static struct attn_hw_reg ysem_int1_bb_b0 = { + 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054}; + +static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044}; + +static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = { + &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg ysem_prty0_bb_b0 = { + 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc}; + +static struct attn_hw_reg ysem_prty1_bb_b0 = { + 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204}; + +static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = { + &ysem_prty0_bb_b0, &ysem_prty1_bb_b0}; + +static struct attn_hw_reg psem_int0_bb_b0 = { + 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044}; + +static struct attn_hw_reg psem_int1_bb_b0 = { + 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054}; + +static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044}; + +static struct attn_hw_reg *psem_int_bb_b0_regs[3] = { + &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg psem_prty0_bb_b0 = { + 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc}; + +static struct attn_hw_reg psem_prty1_bb_b0 = { + 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204}; + +static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = { + &psem_prty0_bb_b0, &psem_prty1_bb_b0, + &psem_fast_memory_vfc_config_prty1_bb_b0}; + +static struct attn_hw_reg rss_int0_bb_b0 = { + 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984}; + +static struct attn_hw_reg *rss_int_bb_b0_regs[1] = { + &rss_int0_bb_b0}; + +static struct attn_hw_reg rss_prty1_bb_b0 = { + 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04}; + +static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = { + &rss_prty1_bb_b0}; + +static struct attn_hw_reg tmld_int0_bb_b0 = { + 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184}; + +static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = { + &tmld_int0_bb_b0}; + +static struct attn_hw_reg tmld_prty1_bb_b0 = { + 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204}; + +static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = { + &tmld_prty1_bb_b0}; + +static struct attn_hw_reg muld_int0_bb_b0 = { + 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184}; + +static struct attn_hw_reg *muld_int_bb_b0_regs[1] = { + &muld_int0_bb_b0}; + +static struct attn_hw_reg muld_prty1_bb_b0 = { + 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204}; + +static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = { + &muld_prty1_bb_b0}; + +static struct attn_hw_reg yuld_int0_bb_b0 = { + 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184}; + +static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = { + &yuld_int0_bb_b0}; + +static struct attn_hw_reg yuld_prty1_bb_b0 = { + 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204}; + +static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = { + &yuld_prty1_bb_b0}; + +static struct attn_hw_reg xyld_int0_bb_b0 = { + 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184}; + +static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = { + &xyld_int0_bb_b0}; + +static struct attn_hw_reg xyld_prty1_bb_b0 = { + 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204}; + +static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = { + &xyld_prty1_bb_b0}; + +static struct attn_hw_reg prm_int0_bb_b0 = { + 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044}; + +static struct attn_hw_reg *prm_int_bb_b0_regs[1] = { + &prm_int0_bb_b0}; + +static struct attn_hw_reg prm_prty0_bb_b0 = { + 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054}; + +static struct attn_hw_reg prm_prty1_bb_b0 = { + 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204}; + +static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = { + &prm_prty0_bb_b0, &prm_prty1_bb_b0}; + +static struct attn_hw_reg pbf_pb1_int0_bb_b0 = { + 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044}; + +static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = { + &pbf_pb1_int0_bb_b0}; + +static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = { + 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054}; + +static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = { + &pbf_pb1_prty0_bb_b0}; + +static struct attn_hw_reg pbf_pb2_int0_bb_b0 = { + 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044}; + +static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = { + &pbf_pb2_int0_bb_b0}; + +static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = { + 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054}; + +static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = { + &pbf_pb2_prty0_bb_b0}; + +static struct attn_hw_reg rpb_int0_bb_b0 = { + 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044}; + +static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = { + &rpb_int0_bb_b0}; + +static struct attn_hw_reg rpb_prty0_bb_b0 = { + 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054}; + +static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = { + &rpb_prty0_bb_b0}; + +static struct attn_hw_reg btb_int0_bb_b0 = { + 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4}; + +static struct attn_hw_reg btb_int1_bb_b0 = { + 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc}; + +static struct attn_hw_reg btb_int2_bb_b0 = { + 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4}; + +static struct attn_hw_reg btb_int3_bb_b0 = { + 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c}; + +static struct attn_hw_reg btb_int4_bb_b0 = { + 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124}; + +static struct attn_hw_reg btb_int5_bb_b0 = { + 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c}; + +static struct attn_hw_reg btb_int6_bb_b0 = { + 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154}; + +static struct attn_hw_reg btb_int8_bb_b0 = { + 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188}; + +static struct attn_hw_reg btb_int9_bb_b0 = { + 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0}; + +static struct attn_hw_reg btb_int10_bb_b0 = { + 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8}; + +static struct attn_hw_reg btb_int11_bb_b0 = { + 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0}; + +static struct attn_hw_reg *btb_int_bb_b0_regs[11] = { + &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0, + &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0, + &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0}; + +static struct attn_hw_reg btb_prty0_bb_b0 = { + 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0}; + +static struct attn_hw_reg btb_prty1_bb_b0 = { + 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404}; + +static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = { + &btb_prty0_bb_b0, &btb_prty1_bb_b0}; + +static struct attn_hw_reg pbf_int0_bb_b0 = { + 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184}; + +static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = { + &pbf_int0_bb_b0}; + +static struct attn_hw_reg pbf_prty0_bb_b0 = { + 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194}; + +static struct attn_hw_reg pbf_prty1_bb_b0 = { + 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204}; + +static struct attn_hw_reg pbf_prty2_bb_b0 = { + 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214}; + +static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = { + &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0}; + +static struct attn_hw_reg rdif_int0_bb_b0 = { + 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184}; + +static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = { + &rdif_int0_bb_b0}; + +static struct attn_hw_reg rdif_prty0_bb_b0 = { + 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194}; + +static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = { + &rdif_prty0_bb_b0}; + +static struct attn_hw_reg tdif_int0_bb_b0 = { + 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184}; + +static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = { + &tdif_int0_bb_b0}; + +static struct attn_hw_reg tdif_prty0_bb_b0 = { + 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194}; + +static struct attn_hw_reg tdif_prty1_bb_b0 = { + 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204}; + +static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = { + &tdif_prty0_bb_b0, &tdif_prty1_bb_b0}; + +static struct attn_hw_reg cdu_int0_bb_b0 = { + 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc}; + +static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = { + &cdu_int0_bb_b0}; + +static struct attn_hw_reg cdu_prty1_bb_b0 = { + 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204}; + +static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = { + &cdu_prty1_bb_b0}; + +static struct attn_hw_reg ccfc_int0_bb_b0 = { + 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184}; + +static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = { + &ccfc_int0_bb_b0}; + +static struct attn_hw_reg ccfc_prty1_bb_b0 = { + 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204}; + +static struct attn_hw_reg ccfc_prty0_bb_b0 = { + 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8}; + +static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = { + &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0}; + +static struct attn_hw_reg tcfc_int0_bb_b0 = { + 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184}; + +static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = { + &tcfc_int0_bb_b0}; + +static struct attn_hw_reg tcfc_prty1_bb_b0 = { + 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204}; + +static struct attn_hw_reg tcfc_prty0_bb_b0 = { + 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8}; + +static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = { + &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0}; + +static struct attn_hw_reg igu_int0_bb_b0 = { + 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184}; + +static struct attn_hw_reg *igu_int_bb_b0_regs[1] = { + &igu_int0_bb_b0}; + +static struct attn_hw_reg igu_prty0_bb_b0 = { + 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194}; + +static struct attn_hw_reg igu_prty1_bb_b0 = { + 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204}; + +static struct attn_hw_reg igu_prty2_bb_b0 = { + 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214}; + +static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = { + &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0}; + +static struct attn_hw_reg cau_int0_bb_b0 = { + 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0}; + +static struct attn_hw_reg *cau_int_bb_b0_regs[1] = { + &cau_int0_bb_b0}; + +static struct attn_hw_reg cau_prty1_bb_b0 = { + 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204}; + +static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = { + &cau_prty1_bb_b0}; + +static struct attn_hw_reg dbg_int0_bb_b0 = { + 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184}; + +static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = { + &dbg_int0_bb_b0}; + +static struct attn_hw_reg dbg_prty1_bb_b0 = { + 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204}; + +static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = { + &dbg_prty1_bb_b0}; + +static struct attn_hw_reg nig_int0_bb_b0 = { + 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044}; + +static struct attn_hw_reg nig_int1_bb_b0 = { + 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054}; + +static struct attn_hw_reg nig_int2_bb_b0 = { + 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064}; + +static struct attn_hw_reg nig_int3_bb_b0 = { + 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074}; + +static struct attn_hw_reg nig_int4_bb_b0 = { + 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084}; + +static struct attn_hw_reg nig_int5_bb_b0 = { + 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094}; + +static struct attn_hw_reg *nig_int_bb_b0_regs[6] = { + &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0, + &nig_int4_bb_b0, &nig_int5_bb_b0}; + +static struct attn_hw_reg nig_prty0_bb_b0 = { + 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4}; + +static struct attn_hw_reg nig_prty1_bb_b0 = { + 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204}; + +static struct attn_hw_reg nig_prty2_bb_b0 = { + 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214}; + +static struct attn_hw_reg nig_prty3_bb_b0 = { + 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224}; + +static struct attn_hw_reg nig_prty4_bb_b0 = { + 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234}; + +static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = { + &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, + &nig_prty3_bb_b0, &nig_prty4_bb_b0}; + +static struct attn_hw_reg ipc_int0_bb_b0 = { + 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510}; + +static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = { + &ipc_int0_bb_b0}; + +static struct attn_hw_reg ipc_prty0_bb_b0 = { + 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520}; + +static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = { + &ipc_prty0_bb_b0}; + +static struct attn_hw_block attn_blocks[] = { + {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } }, + {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } }, + {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } }, + {"dbu", {{0, 0, NULL, NULL} } }, + {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs, + pglue_b_prty_bb_b0_regs} } }, + {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } }, + {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } }, + {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } }, + {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } }, + {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } }, + {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } }, + {"mcp", {{0, 0, NULL, NULL} } }, + {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } }, + {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } }, + {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs, + pswhst2_prty_bb_b0_regs} } }, + {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } }, + {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } }, + {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } }, + {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } }, + {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } }, + {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } }, + {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } }, + {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } }, + {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } }, + {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } }, + {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } }, + {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } }, + {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } }, + {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } }, + {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } }, + {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } }, + {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } }, + {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } }, + {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } }, + {"src", {{1, 0, src_int_bb_b0_regs, NULL} } }, + {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } }, + {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } }, + {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } }, + {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } }, + {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } }, + {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } }, + {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } }, + {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } }, + {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } }, + {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } }, + {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } }, + {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } }, + {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } }, + {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } }, + {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } }, + {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } }, + {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } }, + {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } }, + {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } }, + {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs, + pbf_pb1_prty_bb_b0_regs} } }, + {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs, + pbf_pb2_prty_bb_b0_regs} } }, + {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } }, + {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } }, + {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } }, + {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } }, + {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } }, + {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } }, + {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } }, + {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } }, + {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } }, + {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } }, + {"umac", { {0, 0, NULL, NULL} } }, + {"xmac", { {0, 0, NULL, NULL} } }, + {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } }, + {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } }, + {"wol", { {0, 0, NULL, NULL} } }, + {"bmbn", { {0, 0, NULL, NULL} } }, + {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } }, + {"nwm", { {0, 0, NULL, NULL} } }, + {"nws", { {0, 0, NULL, NULL} } }, + {"ms", { {0, 0, NULL, NULL} } }, + {"phy_pcie", { {0, 0, NULL, NULL} } }, + {"misc_aeu", { {0, 0, NULL, NULL} } }, + {"bar0_map", { {0, 0, NULL, NULL} } },}; + +/* Specific HW attention callbacks */ +static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); + + /* This might occur on certain instances; Log it once then mask it */ + DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", + tmp); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, + 0xffffffff); + + return 0; +} + +#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) +#define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) +#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) +#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) +#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) +#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) +#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) +#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) +#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) +#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) +#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) +#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) +#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) +static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_VALID); + + if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { + u32 addr, data, length; + + addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_ADDRESS); + data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_DATA); + length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_LENGTH); + + DP_INFO(p_hwfn->cdev, + "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", + addr, length, + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_VF_VALID), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_CLIENT), + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_BYTE_EN), + data); + } + + return 0; +} + +#define QED_GRC_ATTENTION_VALID_BIT (1 << 0) +#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) +#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) +#define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) +#define QED_GRC_ATTENTION_MASTER_MASK (0xf) +#define QED_GRC_ATTENTION_MASTER_SHIFT (24) +#define QED_GRC_ATTENTION_PF_MASK (0xf) +#define QED_GRC_ATTENTION_PF_SHIFT (0) +#define QED_GRC_ATTENTION_VF_MASK (0xff) +#define QED_GRC_ATTENTION_VF_SHIFT (4) +#define QED_GRC_ATTENTION_PRIV_MASK (0x3) +#define QED_GRC_ATTENTION_PRIV_SHIFT (14) +#define QED_GRC_ATTENTION_PRIV_VF (0) +static const char *attn_master_to_str(u8 master) +{ + switch (master) { + case 1: return "PXP"; + case 2: return "MCP"; + case 3: return "MSDM"; + case 4: return "PSDM"; + case 5: return "YSDM"; + case 6: return "USDM"; + case 7: return "TSDM"; + case 8: return "XSDM"; + case 9: return "DBU"; + case 10: return "DMAE"; + default: + return "Unkown"; + } +} + +static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp, tmp2; + + /* We've already cleared the timeout interrupt register, so we learn + * of interrupts via the validity register + */ + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); + if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) + goto out; + + /* Read the GRC timeout information */ + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); + tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); + + DP_INFO(p_hwfn->cdev, + "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", + tmp2, tmp, + (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", + GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, + attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), + GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), + (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == + QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)", + GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); + +out: + /* Regardles of anything else, clean the validity bit */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); + return 0; +} + +#define PGLUE_ATTENTION_VALID (1 << 29) +#define PGLUE_ATTENTION_RD_VALID (1 << 26) +#define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) +#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) +#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) +#define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) +#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) +#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) +#define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) +#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) +#define PGLUE_ATTENTION_ICPL_VALID (1 << 23) +#define PGLUE_ATTENTION_ZLR_VALID (1 << 25) +#define PGLUE_ATTENTION_ILT_VALID (1 << 23) +static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp; + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS2); + if (tmp & PGLUE_ATTENTION_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_63_32); + details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS); + + DP_INFO(p_hwfn, + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + } + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS2); + if (tmp & PGLUE_ATTENTION_RD_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_63_32); + details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS); + + DP_INFO(p_hwfn, + "Illegal read by chip from [%08x:%08x] blocked.\n" + " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 + : 0, + GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 + : 0); + } + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) + DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp); + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + if (tmp & PGLUE_ATTENTION_ZLR_VALID) { + u32 addr_hi, addr_lo; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); + + DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); + } + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + if (tmp & PGLUE_ATTENTION_ILT_VALID) { + u32 addr_hi, addr_lo, details; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); + details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS); + + DP_INFO(p_hwfn, + "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); + } + + /* Clear the indications */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); + + return 0; +} + +#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) +static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 reason; + + reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & + QED_DORQ_ATTENTION_REASON_MASK; + if (reason) { + u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + DORQ_REG_DB_DROP_DETAILS); + + DP_INFO(p_hwfn->cdev, + "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", + qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS), + (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), + GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, + reason); + } + + return -EINVAL; +} + +/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ +static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { + { + { /* After Invert 1 */ + {"GPIO0 function%d", + (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 2 */ + {"PGLUE config_space", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PGLUE misc_flr", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PGLUE B RBC", ATTENTION_PAR_INT, + qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, + {"PGLUE misc_mctp", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | + (1 << ATTENTION_OFFSET_SHIFT), + NULL, MAX_BLOCK_ID}, + {"PCIE glue/PXP VPD %d", + (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, + } + }, + + { + { /* After Invert 3 */ + {"General Attention %d", + (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 4 */ + {"General Attention 32", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"General Attention %d", + (2 << ATTENTION_LENGTH_SHIFT) | + (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, + {"General Attention 35", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), + NULL, BLOCK_CNIG}, + {"MCP CPU", ATTENTION_SINGLE, + qed_mcp_attn_cb, MAX_BLOCK_ID}, + {"MCP Watchdog timer", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"AVS stop status ready", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, + {"MSTAT per-path", ATTENTION_PAR_INT, + NULL, MAX_BLOCK_ID}, + {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), + NULL, MAX_BLOCK_ID}, + {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, + {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, + {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, + {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, + {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, + } + }, + + { + { /* After Invert 5 */ + {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, + {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, + {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, + {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, + {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, + {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, + {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, + {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, + {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, + {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, + {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, + {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, + {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, + {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, + {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, + {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, + } + }, + + { + { /* After Invert 6 */ + {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, + {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, + {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, + {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, + {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, + {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, + {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, + {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, + {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, + {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, + {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, + {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, + {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, + {"DORQ", ATTENTION_PAR_INT, + qed_dorq_attn_cb, BLOCK_DORQ}, + {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, + {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, + } + }, + + { + { /* After Invert 7 */ + {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, + {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, + {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, + {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, + {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, + {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, + {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, + {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, + {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, + {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, + {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, + {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, + {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, + {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, + {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, + {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, + {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, + } + }, + + { + { /* After Invert 8 */ + {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWRQ2}, + {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, + {"PSWWR (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWWR2}, + {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, + {"PSWRD (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWRD2}, + {"PSWHST", ATTENTION_PAR_INT, + qed_pswhst_attn_cb, BLOCK_PSWHST}, + {"PSWHST (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWHST2}, + {"GRC", ATTENTION_PAR_INT, + qed_grc_attn_cb, BLOCK_GRC}, + {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, + {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, + {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, + {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, + NULL, BLOCK_PGLCS}, + {"PERST_B assertion", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PERST_B deassertion", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), + NULL, MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 9 */ + {"MCP Latched memory", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad cache", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"MCP Latched ump_tx", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, + {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), + NULL, MAX_BLOCK_ID}, + } + }, +}; + +#define ATTN_STATE_BITS (0xfff) #define ATTN_BITS_MASKABLE (0x3ff) struct qed_sb_attn_info { /* Virtual & Physical address of the SB */ struct atten_status_block *sb_attn; - dma_addr_t sb_phys; + dma_addr_t sb_phys; /* Last seen running index */ - u16 index; + u16 index; + + /* A mask of the AEU bits resulting in a parity error */ + u32 parity_mask[NUM_ATTN_REGS]; + + /* A pointer to the attention description structure */ + struct aeu_invert_reg *p_aeu_desc; /* Previously asserted attentions, which are still unasserted */ - u16 known_attn; + u16 known_attn; /* Cleanup address for the link's general hw attention */ - u32 mfw_attn_addr; + u32 mfw_attn_addr; }; static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, @@ -127,6 +1840,162 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn, return 0; } +static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn, + struct attn_hw_reg *p_reg_desc, + struct attn_hw_block *p_block, + enum qed_attention_type type, + u32 val, u32 mask) +{ + int j; + + for (j = 0; j < p_reg_desc->num_of_bits; j++) { + if (!(val & (1 << j))) + continue; + + DP_NOTICE(p_hwfn, + "%s (%s): reg %d [0x%08x], bit %d [%s]\n", + p_block->name, + type == QED_ATTN_TYPE_ATTN ? "Interrupt" : + "Parity", + p_reg_desc->reg_idx, p_reg_desc->sts_addr, + j, (mask & (1 << j)) ? " [MASKED]" : ""); + } +} + +/** + * @brief qed_int_deassertion_aeu_bit - handles the effects of a single + * cause of the attention + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the attention + * @param aeu_en_reg - register offset of the AEU enable reg. which configured + * this bit to this group. + * @param bit_index - index of this bit in the aeu_en_reg + * + * @return int + */ +static int +qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u32 aeu_en_reg, + u32 bitmask) +{ + int rc = -EINVAL; + u32 val; + + DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", + p_aeu->bit_name, bitmask); + + /* Call callback before clearing the interrupt status */ + if (p_aeu->cb) { + DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", + p_aeu->bit_name); + rc = p_aeu->cb(p_hwfn); + } + + /* Handle HW block interrupt registers */ + if (p_aeu->block_index != MAX_BLOCK_ID) { + struct attn_hw_block *p_block; + u32 mask; + int i; + + p_block = &attn_blocks[p_aeu->block_index]; + + /* Handle each interrupt register */ + for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) { + struct attn_hw_reg *p_reg_desc; + u32 sts_addr; + + p_reg_desc = p_block->chip_regs[0].int_regs[i]; + + /* In case of fatal attention, don't clear the status + * so it would appear in following idle check. + */ + if (rc == 0) + sts_addr = p_reg_desc->sts_clr_addr; + else + sts_addr = p_reg_desc->sts_addr; + + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr); + mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->mask_addr); + qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, + p_block, + QED_ATTN_TYPE_ATTN, + val, mask); + } + } + + /* If the attention is benign, no need to prevent it */ + if (!rc) + goto out; + + /* Prevent this Attention from being asserted in the future */ + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); + DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", + p_aeu->bit_name); + +out: + return rc; +} + +static void qed_int_parity_print(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + struct attn_hw_block *p_block, + u8 bit_index) +{ + int i; + + for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) { + struct attn_hw_reg *p_reg_desc; + u32 val, mask; + + p_reg_desc = p_block->chip_regs[0].prty_regs[i]; + + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->sts_clr_addr); + mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->mask_addr); + qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, + p_block, + QED_ATTN_TYPE_PARITY, + val, mask); + } +} + +/** + * @brief qed_int_deassertion_parity - handle a single parity AEU source + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the parity + * @param bit_index + */ +static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u8 bit_index) +{ + u32 block_id = p_aeu->block_index; + + DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n", + p_aeu->bit_name, bit_index); + + if (block_id != MAX_BLOCK_ID) { + qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id], + bit_index); + + /* In BB, there's a single parity bit for several blocks */ + if (block_id == BLOCK_BTB) { + qed_int_parity_print(p_hwfn, p_aeu, + &attn_blocks[BLOCK_OPTE], + bit_index); + qed_int_parity_print(p_hwfn, p_aeu, + &attn_blocks[BLOCK_MCP], + bit_index); + } + } +} + /** * @brief - handles deassertion of previously asserted attentions. * @@ -139,17 +2008,108 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, u16 deasserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; - u32 aeu_mask; + u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask; + u8 i, j, k, bit_idx; + int rc = 0; + + /* Read the attention registers in the AEU */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_AFTER_INVERT_1_IGU + + i * 0x4); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Deasserted bits [%d]: %08x\n", + i, aeu_inv_arr[i]); + } - if (deasserted_bits != 0x100) - DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n"); + /* Find parity attentions first */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; + u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32)); + u32 parities; + + /* Skip register in which no parity bit is currently set */ + parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; + if (!parities) + continue; + + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; + + if ((p_bit->flags & ATTENTION_PARITY) && + !!(parities & (1 << bit_idx))) + qed_int_deassertion_parity(p_hwfn, p_bit, + bit_idx); + + bit_idx += ATTENTION_LENGTH(p_bit->flags); + } + } + + /* Find non-parity cause for attention and act */ + for (k = 0; k < MAX_ATTN_GRPS; k++) { + struct aeu_invert_reg_bit *p_aeu; + + /* Handle only groups whose attention is currently deasserted */ + if (!(deasserted_bits & (1 << k))) + continue; + + for (i = 0; i < NUM_ATTN_REGS; i++) { + u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32) + + k * sizeof(u32) * NUM_ATTN_REGS; + u32 en, bits; + + en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + bits = aeu_inv_arr[i] & en; + + /* Skip if no bit from this group is currently set */ + if (!bits) + continue; + + /* Find all set bits from current register which belong + * to current group, making them responsible for the + * previous assertion. + */ + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + u8 bit, bit_len; + u32 bitmask; + + p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; + + /* No need to handle parity-only bits */ + if (p_aeu->flags == ATTENTION_PAR) + continue; + + bit = bit_idx; + bit_len = ATTENTION_LENGTH(p_aeu->flags); + if (p_aeu->flags & ATTENTION_PAR_INT) { + /* Skip Parity */ + bit++; + bit_len--; + } + + bitmask = bits & (((1 << bit_len) - 1) << bit); + if (bitmask) { + /* Handle source of the attention */ + qed_int_deassertion_aeu_bit(p_hwfn, + p_aeu, + aeu_en, + bitmask); + } + + bit_idx += ATTENTION_LENGTH(p_aeu->flags); + } + } + } /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_IGU_CMD + - ((IGU_CMD_ATTN_BIT_CLR_UPPER - - IGU_CMD_INT_ACK_BASE) << 3), - ~((u32)deasserted_bits)); + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_CLR_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + ~((u32)deasserted_bits)); /* Unmask deasserted attentions in IGU */ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, @@ -160,7 +2120,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, /* Clear deassertion from inner state */ sb_attn_sw->known_attn &= ~deasserted_bits; - return 0; + return rc; } static int qed_int_attentions(struct qed_hwfn *p_hwfn) @@ -343,17 +2303,17 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) { - struct qed_dev *cdev = p_hwfn->cdev; - struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; - - if (p_sb) { - if (p_sb->sb_attn) - dma_free_coherent(&cdev->pdev->dev, - SB_ATTN_ALIGNED_SIZE(p_hwfn), - p_sb->sb_attn, - p_sb->sb_phys); - kfree(p_sb); - } + struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; + + if (!p_sb) + return; + + if (p_sb->sb_attn) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + p_sb->sb_attn, + p_sb->sb_phys); + kfree(p_sb); } static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, @@ -379,10 +2339,31 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, dma_addr_t sb_phy_addr) { struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + int i, j, k; sb_info->sb_attn = sb_virt_addr; sb_info->sb_phys = sb_phy_addr; + /* Set the pointer to the AEU descriptors */ + sb_info->p_aeu_desc = aeu_descs; + + /* Calculate Parity Masks */ + memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); + for (i = 0; i < NUM_ATTN_REGS; i++) { + /* j is array index, k is bit index */ + for (j = 0, k = 0; k < 32; j++) { + unsigned int flags = aeu_descs[i].bits[j].flags; + + if (flags & ATTENTION_PARITY) + sb_info->parity_mask[i] |= 1 << k; + + k += ATTENTION_LENGTH(flags); + } + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Attn Mask [Reg %d]: 0x%08x\n", + i, sb_info->parity_mask[i]); + } + /* Set the address of cleanup for the mcp attention */ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0; @@ -399,7 +2380,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, dma_addr_t p_phys = 0; /* SB struct */ - p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); + p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); if (!p_sb) { DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n"); return -ENOMEM; @@ -433,6 +2414,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, u16 vf_number, u8 vf_valid) { + struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); @@ -451,14 +2433,12 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, cau_state = CAU_HC_DISABLE_STATE; - if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { + if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { cau_state = CAU_HC_ENABLE_STATE; - if (!p_hwfn->cdev->rx_coalesce_usecs) - p_hwfn->cdev->rx_coalesce_usecs = - QED_CAU_DEF_RX_USECS; - if (!p_hwfn->cdev->tx_coalesce_usecs) - p_hwfn->cdev->tx_coalesce_usecs = - QED_CAU_DEF_TX_USECS; + if (!cdev->rx_coalesce_usecs) + cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; + if (!cdev->tx_coalesce_usecs) + cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; } SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); @@ -473,20 +2453,20 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, u8 vf_valid) { struct cau_sb_entry sb_entry; - u32 val; qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, vf_number, vf_valid); if (p_hwfn->hw_init_done) { - val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64); - qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys)); - qed_wr(p_hwfn, p_ptt, val + sizeof(u32), - upper_32_bits(sb_phys)); - - val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64); - qed_wr(p_hwfn, p_ptt, val, sb_entry.data); - qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params); + /* Wide-bus, initialize via DMAE */ + u64 phys_addr = (u64)sb_phys; + + qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, + CAU_REG_SB_ADDR_MEMORY + + igu_sb_id * sizeof(u64), 2, 0); + qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_sb_id * sizeof(u64), 2, 0); } else { /* Initialize Status Block Address */ STORE_RT_REG_AGG(p_hwfn, @@ -638,8 +2618,10 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn, sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); - p_hwfn->sbs_info[sb_id] = NULL; - p_hwfn->num_sbs--; + if (p_hwfn->sbs_info[sb_id] != NULL) { + p_hwfn->sbs_info[sb_id] = NULL; + p_hwfn->num_sbs--; + } return 0; } @@ -648,14 +2630,15 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) { struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; - if (p_sb) { - if (p_sb->sb_info.sb_virt) - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - SB_ALIGNED_SIZE(p_hwfn), - p_sb->sb_info.sb_virt, - p_sb->sb_info.sb_phys); - kfree(p_sb); - } + if (!p_sb) + return; + + if (p_sb->sb_info.sb_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + SB_ALIGNED_SIZE(p_hwfn), + p_sb->sb_info.sb_virt, + p_sb->sb_info.sb_phys); + kfree(p_sb); } static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, @@ -666,7 +2649,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, void *p_virt; /* SB struct */ - p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); + p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); if (!p_sb) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n"); return -ENOMEM; @@ -692,25 +2675,6 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, return 0; } -static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) -{ - if (!p_hwfn) - return; - - if (p_hwfn->p_sp_sb) - qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); - else - DP_NOTICE(p_hwfn->cdev, - "Failed to setup Slow path status block - NULL pointer\n"); - - if (p_hwfn->p_sb_attn) - qed_int_sb_attn_setup(p_hwfn, p_ptt); - else - DP_NOTICE(p_hwfn->cdev, - "Failed to setup attentions status block - NULL pointer\n"); -} - int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, void *cookie, @@ -718,36 +2682,36 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn, __le16 **p_fw_cons) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; - int qed_status = -ENOMEM; + int rc = -ENOMEM; u8 pi; /* Look for a free index */ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { - if (!p_sp_sb->pi_info_arr[pi].comp_cb) { - p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; - p_sp_sb->pi_info_arr[pi].cookie = cookie; - *sb_idx = pi; - *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; - qed_status = 0; - break; - } + if (p_sp_sb->pi_info_arr[pi].comp_cb) + continue; + + p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; + p_sp_sb->pi_info_arr[pi].cookie = cookie; + *sb_idx = pi; + *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; + rc = 0; + break; } - return qed_status; + return rc; } int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; - int qed_status = -ENOMEM; - if (p_sp_sb->pi_info_arr[pi].comp_cb) { - p_sp_sb->pi_info_arr[pi].comp_cb = NULL; - p_sp_sb->pi_info_arr[pi].cookie = NULL; - qed_status = 0; - } + if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) + return -ENOMEM; + + p_sp_sb->pi_info_arr[pi].comp_cb = NULL; + p_sp_sb->pi_info_arr[pi].cookie = NULL; - return qed_status; + return 0; } u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) @@ -786,16 +2750,13 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode) { - int rc, i; - - /* Mask non-link attentions */ - for (i = 0; i < 9; i++) - qed_wr(p_hwfn, p_ptt, - MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); + int rc; - /* Configure AEU signal change to produce attentions for link */ + /* Configure AEU signal change to produce attentions */ + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); /* Flush the writes to IGU */ mmiowb(); @@ -937,6 +2898,39 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, } } +static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 sb_id) +{ + u32 val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * sb_id); + struct qed_igu_block *p_block; + + p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + + /* stop scanning when hit first invalid PF entry */ + if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && + GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) + goto out; + + /* Fill the block information */ + p_block->status = QED_IGU_STATUS_VALID; + p_block->function_id = GET_FIELD(val, + IGU_MAPPING_LINE_FUNCTION_NUMBER); + p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + p_block->vector_number = GET_FIELD(val, + IGU_MAPPING_LINE_VECTOR_NUMBER); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n", + sb_id, val, p_block->function_id, + p_block->is_pf, p_block->vector_number); + +out: + return val; +} + int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -946,7 +2940,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, u16 sb_id; u16 prev_sb_id = 0xFF; - p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC); + p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); if (!p_hwfn->hw_info.p_igu_info) return -ENOMEM; @@ -963,26 +2957,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, sb_id++) { blk = &p_igu_info->igu_map.igu_blocks[sb_id]; - val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); + val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); /* stop scanning when hit first invalid PF entry */ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) break; - blk->status = QED_IGU_STATUS_VALID; - blk->function_id = GET_FIELD(val, - IGU_MAPPING_LINE_FUNCTION_NUMBER); - blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); - blk->vector_number = GET_FIELD(val, - IGU_MAPPING_LINE_VECTOR_NUMBER); - - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n", - val, blk->function_id, blk->is_pf, - blk->vector_number); - if (blk->is_pf) { if (blk->function_id == p_hwfn->rel_pf_id) { blk->status |= QED_IGU_STATUS_PF; @@ -1072,7 +3053,7 @@ static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) { - p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC); + p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); if (!p_hwfn->sp_dpc) return -ENOMEM; @@ -1117,22 +3098,22 @@ void qed_int_free(struct qed_hwfn *p_hwfn) void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - qed_int_sp_sb_setup(p_hwfn, p_ptt); + qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); + qed_int_sb_attn_setup(p_hwfn, p_ptt); qed_int_sp_dpc_setup(p_hwfn); } -int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, - int *p_iov_blks) +void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + struct qed_sb_cnt_info *p_sb_cnt_info) { struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; - if (!info) - return 0; - - if (p_iov_blks) - *p_iov_blks = info->free_blks; + if (!info || !p_sb_cnt_info) + return; - return info->igu_sb_cnt; + p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; + p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; + p_sb_cnt_info->sb_free_blk = info->free_blks; } void qed_int_disable_post_isr_release(struct qed_dev *cdev) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 51e0b09a7f47..c57f2e680770 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -161,12 +161,12 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie); * blocks configured for this funciton in the igu. * * @param p_hwfn - * @param p_iov_blks - configured free blks for vfs + * @param p_sb_cnt_info * * @return int - number of status blocks configured */ -int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, - int *p_iov_blks); +void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + struct qed_sb_cnt_info *p_sb_cnt_info); /** * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index f72036a2ef5b..3f35c6ca9252 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -31,6 +31,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" @@ -124,52 +125,65 @@ struct qed_sp_vport_update_params { u8 update_vport_active_tx_flg; u8 vport_active_tx_flg; u8 update_approx_mcast_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; unsigned long bins[8]; struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; }; +enum qed_tpa_mode { + QED_TPA_MODE_NONE, + QED_TPA_MODE_UNUSED, + QED_TPA_MODE_GRO, + QED_TPA_MODE_MAX +}; + +struct qed_sp_vport_start_params { + enum qed_tpa_mode tpa_mode; + bool remove_inner_vlan; + bool drop_ttl0; + u8 max_buffers_per_cqe; + u32 concrete_fid; + u16 opaque_fid; + u8 vport_id; + u16 mtu; +}; + #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, - u32 concrete_fid, - u16 opaque_fid, - u8 vport_id, - u16 mtu, - u8 drop_ttl0_flg, - u8 inner_vlan_removal_en_flg) + struct qed_sp_vport_start_params *p_params) { - struct qed_sp_init_request_params params; struct vport_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; u16 rx_mode = 0; u8 abs_vport_id = 0; - rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc != 0) return rc; - memset(¶ms, 0, sizeof(params)); - params.ramrod_data_size = sizeof(*p_ramrod); - params.comp_mode = QED_SPQ_MODE_EBLOCK; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - opaque_fid, ETH_RAMROD_VPORT_START, - PROTOCOLID_ETH, - ¶ms); + PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.vport_start; p_ramrod->vport_id = abs_vport_id; - p_ramrod->mtu = cpu_to_le16(mtu); - p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg; - p_ramrod->drop_ttl0_en = drop_ttl0_flg; + p_ramrod->mtu = cpu_to_le16(p_params->mtu); + p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; + p_ramrod->drop_ttl0_en = p_params->drop_ttl0; SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); @@ -180,9 +194,26 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); + p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; + + switch (p_params->tpa_mode) { + case QED_TPA_MODE_GRO: + p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + p_ramrod->tpa_param.tpa_max_size = (u16)-1; + p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; + p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; + p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; + p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; + p_ramrod->tpa_param.tpa_pkt_split_flg = 1; + p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; + break; + default: + break; + } + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, - concrete_fid); + p_params->concrete_fid); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -360,7 +391,7 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, { struct qed_rss_params *p_rss_params = p_params->rss_params; struct vport_update_ramrod_data_cmn *p_cmn; - struct qed_sp_init_request_params sp_params; + struct qed_sp_init_data init_data; struct vport_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; u8 abs_vport_id = 0; @@ -370,17 +401,15 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, if (rc != 0) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = comp_mode; - sp_params.p_comp_data = p_comp_data; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - p_params->opaque_fid, ETH_RAMROD_VPORT_UPDATE, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -393,7 +422,9 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; p_cmn->tx_active_flg = p_params->vport_active_tx_flg; p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; - + p_cmn->accept_any_vlan = p_params->accept_any_vlan; + p_cmn->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { /* Return spq entry which is taken in qed_sp_init_request()*/ @@ -412,8 +443,8 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) { - struct qed_sp_init_request_params sp_params; struct vport_stop_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u8 abs_vport_id = 0; int rc; @@ -422,16 +453,14 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, if (rc != 0) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - opaque_fid, ETH_RAMROD_VPORT_STOP, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -444,8 +473,10 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, static int qed_filter_accept_cmd(struct qed_dev *cdev, u8 vport, struct qed_filter_accept_flags accept_flags, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct qed_sp_vport_update_params vport_update_params; int i, rc; @@ -454,6 +485,8 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = vport; vport_update_params.accept_flags = accept_flags; + vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; + vport_update_params.accept_any_vlan = accept_any_vlan; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -471,6 +504,10 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, "Accept filter configured, flags = [Rx]%x [Tx]%x\n", accept_flags.rx_accept_filter, accept_flags.tx_accept_filter); + if (update_accept_any_vlan) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "accept_any_vlan=%d configured\n", + accept_any_vlan); } return 0; @@ -502,8 +539,8 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_rx_cid; u16 abs_rx_q_id = 0; u8 abs_vport_id = 0; @@ -528,15 +565,15 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, opaque_fid, cid, params->queue_id, params->vport_id, params->sb); - memset(&sp_params, 0, sizeof(params)); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; - sp_params.ramrod_data_size = sizeof(*p_ramrod); + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = cid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - cid, opaque_fid, ETH_RAMROD_RX_QUEUE_START, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -551,12 +588,10 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->complete_event_flg = 1; p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); - p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr); - p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr); + DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); - p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr); - p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr); + DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); rc = qed_spq_post(p_hwfn, p_ent, NULL); @@ -628,21 +663,20 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, { struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct rx_queue_stop_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; u16 abs_rx_q_id = 0; int rc = -EINVAL; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_rx_cid->cid; + init_data.opaque_fid = p_rx_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - p_rx_cid->cid, - p_rx_cid->opaque_fid, ETH_RAMROD_RX_QUEUE_STOP, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -680,8 +714,8 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, union qed_qm_pq_params *p_pq_params) { struct tx_queue_start_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_tx_cid; u8 abs_vport_id; int rc = -EINVAL; @@ -696,15 +730,15 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, if (rc) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = cid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - rc = qed_sp_init_request(p_hwfn, &p_ent, cid, - opaque_fid, + rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_TX_QUEUE_START, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -714,11 +748,9 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(p_params->sb); p_ramrod->sb_index = p_params->sb_idx; p_ramrod->stats_counter_id = stats_id; - p_ramrod->tc = p_pq_params->eth.tc; p_ramrod->pbl_size = cpu_to_le16(pbl_size); - p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); - p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr); + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, @@ -785,20 +817,19 @@ static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) { struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_tx_cid->cid; + init_data.opaque_fid = p_tx_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - p_tx_cid->cid, - p_tx_cid->opaque_fid, ETH_RAMROD_TX_QUEUE_STOP, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -821,9 +852,8 @@ qed_filter_action(enum qed_filter_opcode opcode) case QED_FILTER_REMOVE: action = ETH_FILTER_ACTION_REMOVE; break; - case QED_FILTER_REPLACE: case QED_FILTER_FLUSH: - action = ETH_FILTER_ACTION_REPLACE; + action = ETH_FILTER_ACTION_REMOVE_ALL; break; default: action = MAX_ETH_FILTER_ACTION; @@ -856,9 +886,9 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, { u8 vport_to_add_to = 0, vport_to_remove_from = 0; struct vport_filter_update_ramrod_data *p_ramrod; - struct qed_sp_init_request_params sp_params; struct eth_filter_cmd *p_first_filter; struct eth_filter_cmd *p_second_filter; + struct qed_sp_init_data init_data; enum eth_filter_action action; int rc; @@ -872,17 +902,16 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, if (rc) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(**pp_ramrod); - sp_params.comp_mode = comp_mode; - sp_params.p_comp_data = p_comp_data; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, pp_ent, - qed_spq_get_cid(p_hwfn), - opaque_fid, ETH_RAMROD_FILTERS_UPDATE, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -892,8 +921,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; switch (p_filter_cmd->opcode) { - case QED_FILTER_FLUSH: - p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break; + case QED_FILTER_REPLACE: case QED_FILTER_MOVE: p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; default: @@ -962,6 +990,12 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_second_filter->action = ETH_FILTER_ACTION_ADD; p_second_filter->vport_id = vport_to_add_to; + } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { + p_first_filter->vport_id = vport_to_add_to; + memcpy(p_second_filter, p_first_filter, + sizeof(*p_second_filter)); + p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; + p_second_filter->action = ETH_FILTER_ACTION_ADD; } else { action = qed_filter_action(p_filter_cmd->opcode); @@ -1101,8 +1135,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, { unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct vport_update_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; u8 abs_vport_id = 0; int rc, i; @@ -1118,18 +1152,16 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, return rc; } - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = comp_mode; - sp_params.p_comp_data = p_comp_data; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - p_hwfn->hw_info.opaque_fid, ETH_RAMROD_VPORT_UPDATE, - PROTOCOLID_ETH, - &sp_params); - + PROTOCOLID_ETH, &init_data); if (rc) { DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); return rc; @@ -1230,6 +1262,328 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, return rc; } +/* Statistics related code */ +static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, + u16 statistics_bin) +{ + *p_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_pstorm_per_queue_stat); +} + +static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_pstorm_per_queue_stat pstats; + u32 pstats_addr = 0, pstats_len = 0; + + __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, + statistics_bin); + + memset(&pstats, 0, sizeof(pstats)); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, + pstats_addr, pstats_len); + + p_stats->tx_ucast_bytes += + HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->tx_mcast_bytes += + HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->tx_bcast_bytes += + HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->tx_ucast_pkts += + HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->tx_mcast_pkts += + HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->tx_bcast_pkts += + HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->tx_err_drop_pkts += + HILO_64_REGPAIR(pstats.error_drop_pkts); +} + +static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len) +{ + *p_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + *p_len = sizeof(struct tstorm_per_port_stat); +} + +static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + u32 tstats_addr = 0, tstats_len = 0; + struct tstorm_per_port_stat tstats; + + __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len); + + memset(&tstats, 0, sizeof(tstats)); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, + tstats_addr, tstats_len); + + p_stats->mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + p_stats->mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); +} + +static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, + u16 statistics_bin) +{ + *p_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_ustorm_per_queue_stat); +} + +static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_ustorm_per_queue_stat ustats; + u32 ustats_addr = 0, ustats_len = 0; + + __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, + statistics_bin); + + memset(&ustats, 0, sizeof(ustats)); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, + ustats_addr, ustats_len); + + p_stats->rx_ucast_bytes += + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->rx_mcast_bytes += + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->rx_bcast_bytes += + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->rx_ucast_pkts += + HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->rx_mcast_pkts += + HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->rx_bcast_pkts += + HILO_64_REGPAIR(ustats.rcv_bcast_pkts); +} + +static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, + u16 statistics_bin) +{ + *p_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_mstorm_per_queue_stat); +} + +static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_mstorm_per_queue_stat mstats; + u32 mstats_addr = 0, mstats_len = 0; + + __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, + statistics_bin); + + memset(&mstats, 0, sizeof(mstats)); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, + mstats_addr, mstats_len); + + p_stats->no_buff_discards += + HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + p_stats->ttl0_discard += + HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + p_stats->tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + p_stats->tpa_aborts_num += + HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); +} + +static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats) +{ + struct port_stats port_stats; + int j; + + memset(&port_stats, 0, sizeof(port_stats)); + + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, stats), + sizeof(port_stats)); + + p_stats->rx_64_byte_packets += port_stats.pmm.r64; + p_stats->rx_127_byte_packets += port_stats.pmm.r127; + p_stats->rx_255_byte_packets += port_stats.pmm.r255; + p_stats->rx_511_byte_packets += port_stats.pmm.r511; + p_stats->rx_1023_byte_packets += port_stats.pmm.r1023; + p_stats->rx_1518_byte_packets += port_stats.pmm.r1518; + p_stats->rx_1522_byte_packets += port_stats.pmm.r1522; + p_stats->rx_2047_byte_packets += port_stats.pmm.r2047; + p_stats->rx_4095_byte_packets += port_stats.pmm.r4095; + p_stats->rx_9216_byte_packets += port_stats.pmm.r9216; + p_stats->rx_16383_byte_packets += port_stats.pmm.r16383; + p_stats->rx_crc_errors += port_stats.pmm.rfcs; + p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; + p_stats->rx_pause_frames += port_stats.pmm.rxpf; + p_stats->rx_pfc_frames += port_stats.pmm.rxpp; + p_stats->rx_align_errors += port_stats.pmm.raln; + p_stats->rx_carrier_errors += port_stats.pmm.rfcr; + p_stats->rx_oversize_packets += port_stats.pmm.rovr; + p_stats->rx_jabbers += port_stats.pmm.rjbr; + p_stats->rx_undersize_packets += port_stats.pmm.rund; + p_stats->rx_fragments += port_stats.pmm.rfrg; + p_stats->tx_64_byte_packets += port_stats.pmm.t64; + p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; + p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; + p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; + p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; + p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; + p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; + p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; + p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; + p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; + p_stats->tx_pause_frames += port_stats.pmm.txpf; + p_stats->tx_pfc_frames += port_stats.pmm.txpp; + p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; + p_stats->tx_total_collisions += port_stats.pmm.tncl; + p_stats->rx_mac_bytes += port_stats.pmm.rbyte; + p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; + p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; + p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; + p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; + p_stats->tx_mac_bytes += port_stats.pmm.tbyte; + p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; + p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; + p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; + p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + for (j = 0; j < 8; j++) { + p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; + p_stats->brb_discards += port_stats.brb.brb_discard[j]; + } +} + +static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *stats, + u16 statistics_bin) +{ + __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); + + if (p_hwfn->mcp_info) + __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); +} + +static void _qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + u8 fw_vport = 0; + int i; + + memset(stats, 0, sizeof(*stats)); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + + /* The main vport index is relative first */ + if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { + DP_ERR(p_hwfn, "No vport available!\n"); + continue; + } + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport); + + qed_ptt_release(p_hwfn, p_ptt); + } +} + +void qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + u32 i; + + if (!cdev) { + memset(stats, 0, sizeof(*stats)); + return; + } + + _qed_get_vport_stats(cdev, stats); + + if (!cdev->reset_stats) + return; + + /* Reduce the statistics baseline */ + for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) + ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; +} + +/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ +void qed_reset_vport_stats(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 addr = 0, len = 0; + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + memset(&mstats, 0, sizeof(mstats)); + __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); + + memset(&ustats, 0, sizeof(ustats)); + __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); + + memset(&pstats, 0, sizeof(pstats)); + __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); + + qed_ptt_release(p_hwfn, p_ptt); + } + + /* PORT statistics are not necessarily reset, so we need to + * read and create a baseline for future statistics. + */ + if (!cdev->reset_stats) + DP_INFO(cdev, "Reset stats not allocated\n"); + else + _qed_get_vport_stats(cdev, cdev->reset_stats); +} + static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { @@ -1268,24 +1622,25 @@ static void qed_register_eth_ops(struct qed_dev *cdev, } static int qed_start_vport(struct qed_dev *cdev, - u8 vport_id, - u16 mtu, - u8 drop_ttl0_flg, - u8 inner_vlan_removal_en_flg) + struct qed_start_vport_params *params) { int rc, i; for_each_hwfn(cdev, i) { + struct qed_sp_vport_start_params start = { 0 }; struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - rc = qed_sp_vport_start(p_hwfn, - p_hwfn->hw_info.concrete_fid, - p_hwfn->hw_info.opaque_fid, - vport_id, - mtu, - drop_ttl0_flg, - inner_vlan_removal_en_flg); - + start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : + QED_TPA_MODE_NONE; + start.remove_inner_vlan = params->remove_inner_vlan; + start.drop_ttl0 = params->drop_ttl0; + start.opaque_fid = p_hwfn->hw_info.opaque_fid; + start.concrete_fid = p_hwfn->hw_info.concrete_fid; + start.vport_id = params->vport_id; + start.max_buffers_per_cqe = 16; + start.mtu = params->mtu; + + rc = qed_sp_vport_start(p_hwfn, &start); if (rc) { DP_ERR(cdev, "Failed to start VPORT\n"); return rc; @@ -1295,7 +1650,7 @@ static int qed_start_vport(struct qed_dev *cdev, DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), "Started V-PORT %d with MTU %d\n", - vport_id, mtu); + start.vport_id, start.mtu); } qed_reset_vport_stats(cdev); @@ -1344,6 +1699,9 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; + sp_params.accept_any_vlan = params->accept_any_vlan; + sp_params.update_accept_any_vlan_flg = + params->update_accept_any_vlan_flg; /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. * We need to re-fix the rss values per engine for CMT. @@ -1563,7 +1921,7 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; - return qed_filter_accept_cmd(cdev, 0, accept_flags, + return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, QED_SPQ_MODE_CB, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9d76ce249277..26d40db07ddd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -29,10 +29,10 @@ #include "qed_mcp.h" #include "qed_hw.h" -static const char version[] = - "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n"; +static char version[] = + "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; -MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module"); +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define QED_FW_FILE_NAME \ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" +MODULE_FIRMWARE(QED_FW_FILE_NAME); + static int __init qed_init(void) { pr_notice("qed_init called\n"); @@ -97,12 +99,15 @@ static void qed_free_pci(struct qed_dev *cdev) pci_disable_device(pdev); } +#define PCI_REVISION_ID_ERROR_VAL 0xff + /* Performs PCI initializations as well as initializing PCI-related parameters * in the device structrue. Returns 0 in case of success. */ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) { + u8 rev_id; int rc; cdev->pdev = pdev; @@ -136,6 +141,14 @@ static int qed_init_pci(struct qed_dev *cdev, pci_save_state(pdev); } + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + if (rev_id == PCI_REVISION_ID_ERROR_VAL) { + DP_NOTICE(cdev, + "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", + rev_id); + rc = -ENODEV; + goto err2; + } if (!pci_is_pcie(pdev)) { DP_NOTICE(cdev, "The bus is not PCI Express\n"); rc = -EIO; @@ -190,7 +203,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; - dev_info->is_mf = IS_MF(&cdev->hwfns[0]); + dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); dev_info->fw_major = FW_MAJOR_VERSION; @@ -621,15 +634,18 @@ static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) static int qed_slowpath_setup_int(struct qed_dev *cdev, enum qed_int_mode int_mode) { - int rc, i; - u8 num_vectors = 0; - + struct qed_sb_cnt_info sb_cnt_info; + int rc; + int i; memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); cdev->int_params.in.int_mode = int_mode; - for_each_hwfn(cdev, i) - num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1; - cdev->int_params.in.num_vectors = num_vectors; + for_each_hwfn(cdev, i) { + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); + cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; + cdev->int_params.in.num_vectors++; /* slowpath */ + } /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; @@ -763,7 +779,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, true, data); if (rc) - goto err3; + goto err2; DP_INFO(cdev, "HW initialization and function start completed successfully\n"); @@ -782,12 +798,14 @@ static int qed_slowpath_start(struct qed_dev *cdev, return rc; } + qed_reset_vport_stats(cdev); + return 0; -err3: - qed_free_stream_mem(cdev); - qed_slowpath_irq_free(cdev); err2: + qed_hw_timers_stop_all(cdev); + qed_slowpath_irq_free(cdev); + qed_free_stream_mem(cdev); qed_disable_msix(cdev); err1: qed_resc_free(cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index ba1b1f1ef789..b89c9a8e1655 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -11,8 +11,8 @@ #include <linux/delay.h> #include <linux/errno.h> #include <linux/kernel.h> -#include <linux/mutex.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/string.h> #include "qed.h" #include "qed_hsi.h" @@ -147,7 +147,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, u32 size; /* Allocate mcp_info structure */ - p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC); + p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); if (!p_hwfn->mcp_info) goto err; p_info = p_hwfn->mcp_info; @@ -161,15 +161,15 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, } size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); - p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC); + p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); p_info->mfw_mb_shadow = kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( - p_info->mfw_mb_length), GFP_ATOMIC); + p_info->mfw_mb_length), GFP_KERNEL); if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; - /* Initialize the MFW mutex */ - mutex_init(&p_info->mutex); + /* Initialize the MFW spinlock */ + spin_lock_init(&p_info->lock); return 0; @@ -179,6 +179,52 @@ err: return -ENOMEM; } +/* Locks the MFW mailbox of a PF to ensure a single access. + * The lock is achieved in most cases by holding a spinlock, causing other + * threads to wait till a previous access is done. + * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single + * access is achieved by setting a blocking flag, which will fail other + * competing contexts to send their mailboxes. + */ +static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, + u32 cmd) +{ + spin_lock_bh(&p_hwfn->mcp_info->lock); + + /* The spinlock shouldn't be acquired when the mailbox command is + * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel + * pending [UN]LOAD_REQ command of another PF together with a spinlock + * (i.e. interrupts are disabled) - can lead to a deadlock. + * It is assumed that for a single PF, no other mailbox commands can be + * sent from another context while sending LOAD_REQ, and that any + * parallel commands to UNLOAD_REQ can be cancelled. + */ + if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE) + p_hwfn->mcp_info->block_mb_sending = false; + + if (p_hwfn->mcp_info->block_mb_sending) { + DP_NOTICE(p_hwfn, + "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n", + cmd); + spin_unlock_bh(&p_hwfn->mcp_info->lock); + return -EBUSY; + } + + if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) { + p_hwfn->mcp_info->block_mb_sending = true; + spin_unlock_bh(&p_hwfn->mcp_info->lock); + } + + return 0; +} + +static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, + u32 cmd) +{ + if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) + spin_unlock_bh(&p_hwfn->mcp_info->lock); +} + int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -187,6 +233,13 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, u32 org_mcp_reset_seq, cnt = 0; int rc = 0; + /* Ensure that only a single thread is accessing the mailbox at a + * certain time. + */ + rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET); + if (rc != 0) + return rc; + /* Set drv command along with the updated sequence */ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, @@ -209,6 +262,8 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, rc = -EAGAIN; } + qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET); + return rc; } @@ -275,14 +330,12 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, return rc; } -int qed_mcp_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 cmd, - u32 param, - u32 *o_mcp_resp, - u32 *o_mcp_param) +static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_mb_params *p_mb_params) { - int rc = 0; + u32 union_data_addr; + int rc; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { @@ -290,28 +343,56 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, return -EBUSY; } - /* Lock Mutex to ensure only single thread is - * accessing the MCP at one time + union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, union_data); + + /* Ensure that only a single thread is accessing the mailbox at a + * certain time. */ - mutex_lock(&p_hwfn->mcp_info->mutex); - rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, - o_mcp_resp, o_mcp_param); - /* Release Mutex */ - mutex_unlock(&p_hwfn->mcp_info->mutex); + rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd); + if (rc) + return rc; + + if (p_mb_params->p_data_src != NULL) + qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, + p_mb_params->p_data_src, + sizeof(*p_mb_params->p_data_src)); + + rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd, + p_mb_params->param, &p_mb_params->mcp_resp, + &p_mb_params->mcp_param); + + if (p_mb_params->p_data_dst != NULL) + qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, + union_data_addr, + sizeof(*p_mb_params->p_data_dst)); + + qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd); return rc; } -static void qed_mcp_set_drv_ver(struct qed_dev *cdev, - struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) { - u32 i; + struct qed_mcp_mb_params mb_params; + int rc; - /* Copy version string to MCP */ - for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++) - DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i], - *(u32 *)&cdev->ver_str[i * sizeof(u32)]); + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + return 0; } int qed_mcp_load_req(struct qed_hwfn *p_hwfn, @@ -319,26 +400,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, u32 *p_load_code) { struct qed_dev *cdev = p_hwfn->cdev; - u32 param; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; int rc; - if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); - return -EBUSY; - } - - /* Save driver's version to shmem */ - qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt); - - DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", - p_hwfn->mcp_info->drv_mb_seq, - p_hwfn->mcp_info->drv_pulse_seq); - + memset(&mb_params, 0, sizeof(mb_params)); /* Load Request */ - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ, - (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | - cdev->drv_type), - p_load_code, ¶m); + mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; + mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | + cdev->drv_type; + memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); /* if mcp fails to respond we must abort */ if (rc) { @@ -346,6 +419,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return rc; } + *p_load_code = mb_params.mcp_resp; + /* If MFW refused (e.g. other port is in diagnostic mode) we * must abort. This can happen in the following cases: * - Other port is in diagnostic mode @@ -365,6 +440,33 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 transceiver_state; + + transceiver_state = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + + DP_VERBOSE(p_hwfn, + (NETIF_MSG_HW | QED_MSG_SP), + "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", + transceiver_state, + (u32)(p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data))); + + transceiver_state = GET_FIELD(transceiver_state, + PMM_TRANSCEIVER_STATE); + + if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT) + DP_NOTICE(p_hwfn, "Transceiver is present.\n"); + else + DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) @@ -390,7 +492,10 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, return; } - p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + if (p_hwfn->b_drv_link_init) + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + else + p_link->link_up = false; p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { @@ -492,53 +597,43 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, bool b_up) { struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; - u32 param = 0, reply = 0, cmd; - struct pmm_phy_cfg phy_cfg; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + struct pmm_phy_cfg *phy_cfg; int rc = 0; - u32 i; - - if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); - return -EBUSY; - } + u32 cmd; /* Set the shmem configuration according to params */ - memset(&phy_cfg, 0, sizeof(phy_cfg)); + phy_cfg = &union_data.drv_phy_cfg; + memset(phy_cfg, 0, sizeof(*phy_cfg)); cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; if (!params->speed.autoneg) - phy_cfg.speed = params->speed.forced_speed; - phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; - phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; - phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; - phy_cfg.adv_speed = params->speed.advertised_speeds; - phy_cfg.loopback_mode = params->loopback_mode; - - /* Write the requested configuration to shmem */ - for (i = 0; i < sizeof(phy_cfg); i += 4) - qed_wr(p_hwfn, p_ptt, - p_hwfn->mcp_info->drv_mb_addr + - offsetof(struct public_drv_mb, union_data) + i, - ((u32 *)&phy_cfg)[i >> 2]); + phy_cfg->speed = params->speed.forced_speed; + phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; + phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; + phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; + phy_cfg->adv_speed = params->speed.advertised_speeds; + phy_cfg->loopback_mode = params->loopback_mode; + + p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", - phy_cfg.speed, - phy_cfg.pause, - phy_cfg.adv_speed, - phy_cfg.loopback_mode, - phy_cfg.feature_config_flags); + phy_cfg->speed, + phy_cfg->pause, + phy_cfg->adv_speed, + phy_cfg->loopback_mode, + phy_cfg->feature_config_flags); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); } - DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", - p_hwfn->mcp_info->drv_mb_seq, - p_hwfn->mcp_info->drv_pulse_seq); - - /* Load Request */ - rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, ¶m); + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); /* if mcp fails to respond we must abort */ if (rc) { @@ -581,6 +676,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_LINK_CHANGE: qed_mcp_handle_link_change(p_hwfn, p_ptt, false); break; + case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: + qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); + break; default: DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; @@ -720,26 +818,25 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, return -EINVAL; } - if (p_hwfn->cdev->mf_mode != SF) { - info->bandwidth_min = (shmem_info.config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - info->bandwidth_min); - info->bandwidth_min = 1; - } - info->bandwidth_max = (shmem_info.config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - info->bandwidth_max); - info->bandwidth_max = 100; - } + info->bandwidth_min = (shmem_info.config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT; + if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + info->bandwidth_min); + info->bandwidth_min = 1; + } + + info->bandwidth_max = (shmem_info.config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + info->bandwidth_max); + info->bandwidth_max = 100; } if (shmem_info.mac_upper || shmem_info.mac_lower) { @@ -802,11 +899,11 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn, int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NIG_DRAIN, 100, + DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); /* Wait for the drain to complete before returning */ - msleep(120); + msleep(1020); return rc; } @@ -832,31 +929,28 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_drv_version *p_ver) { - int rc = 0; - u32 param = 0, reply = 0, i; - - if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); - return -EBUSY; - } + struct drv_version_stc *p_drv_version; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + __be32 val; + u32 i; + int rc; - DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version, - p_ver->version); - /* Copy version string to shmem */ - for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) { - DRV_MB_WR(p_hwfn, p_ptt, - union_data.drv_version.name[i * sizeof(u32)], - *(u32 *)&p_ver->name[i * sizeof(u32)]); + p_drv_version = &union_data.drv_version; + p_drv_version->version = p_ver->version; + for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { + val = cpu_to_be32(p_ver->name[i]); + *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val; } - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply, - ¶m); - if (rc) { + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VERSION; + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); - return rc; - } - return 0; + return rc; } int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 506197d5c3dd..50917a2131a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -11,8 +11,8 @@ #include <linux/types.h> #include <linux/delay.h> -#include <linux/mutex.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include "qed_hsi.h" struct qed_mcp_link_speed_params { @@ -255,7 +255,8 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ ((_p_hwfn)->cdev->num_ports_in_engines * 2)) struct qed_mcp_info { - struct mutex mutex; /* MCP access lock */ + spinlock_t lock; + bool block_mb_sending; u32 public_base; u32 drv_mb_addr; u32 mfw_mb_addr; @@ -272,6 +273,15 @@ struct qed_mcp_info { u16 mcp_hist; }; +struct qed_mcp_mb_params { + u32 cmd; + u32 param; + union drv_union_data *p_data_src; + union drv_union_data *p_data_dst; + u32 mcp_resp; + u32 mcp_param; +}; + /** * @brief Initialize the interface with the MCP * diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index e8df12335a97..c15b1622e636 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -127,8 +127,20 @@ 0x00c000UL #define DORQ_REG_IFEN \ 0x100040UL +#define DORQ_REG_DB_DROP_REASON \ + 0x100a2cUL +#define DORQ_REG_DB_DROP_DETAILS \ + 0x100a24UL +#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \ + 0x100a1cUL #define GRC_REG_TIMEOUT_EN \ 0x050404UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \ + 0x050054UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \ + 0x05004cUL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \ + 0x050050UL #define IGU_REG_BLOCK_CONFIGURATION \ 0x180040UL #define MCM_REG_INIT \ @@ -155,6 +167,40 @@ 0x1100000UL #define PGLUE_B_REG_ADMIN_PER_PF_REGION \ 0x2a9000UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \ + 0x2aa150UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \ + 0x2aa144UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \ + 0x2aa148UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS \ + 0x2aa14cUL +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \ + 0x2aa154UL +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \ + 0x2aa158UL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS \ + 0x2aa15cUL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \ + 0x2aa160UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \ + 0x2aa164UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \ + 0x2aa54cUL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \ + 0x2aa544UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \ + 0x2aa548UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \ + 0x2aae74UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \ + 0x2aae78UL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \ + 0x2aae7cUL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \ + 0x2aae80UL +#define PGLUE_B_REG_LATCHED_ERRORS_CLR \ + 0x2aa3bcUL #define PRM_REG_DISABLE_PRM \ 0x230000UL #define PRS_REG_SOFT_RST \ @@ -171,6 +217,14 @@ 0x2a0040UL #define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ 0x29e050UL +#define PSWHST_REG_INCORRECT_ACCESS_VALID \ + 0x2a0070UL +#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \ + 0x2a0074UL +#define PSWHST_REG_INCORRECT_ACCESS_DATA \ + 0x2a0068UL +#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \ + 0x2a006cUL #define PSWRD_REG_DBG_SELECT \ 0x29c040UL #define PSWRD2_REG_CONF11 \ @@ -333,6 +387,8 @@ 0x180800UL #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ 0x00849cUL +#define MISC_REG_AEU_AFTER_INVERT_1_IGU \ + 0x0087b4UL #define MISC_REG_AEU_MASK_ATTN_IGU \ 0x008494UL #define IGU_REG_CLEANUP_STATUS_0 \ @@ -363,6 +419,10 @@ 0x7 << 0) #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 0 +#define MCP_REG_CPU_STATE \ + 0xe05004UL +#define MCP_REG_CPU_EVENT_MASK \ + 0xe05008UL #define PGLUE_B_REG_PF_BAR0_SIZE \ 0x2aae60UL #define PGLUE_B_REG_PF_BAR1_SIZE \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 287fadfab52d..d39f914b66ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -311,19 +311,20 @@ void qed_consq_free(struct qed_hwfn *p_hwfn, #define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_CQE_COMPLETION 0x02 -struct qed_sp_init_request_params { - size_t ramrod_data_size; +struct qed_sp_init_data { + u32 cid; + u16 opaque_fid; + + /* Information regarding operation upon sending & completion */ enum spq_mode comp_mode; struct qed_spq_comp_cb *p_comp_data; }; int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, - u32 cid, - u16 opaque_fid, u8 cmd, u8 protocol, - struct qed_sp_init_request_params *p_params); + struct qed_sp_init_data *p_data); /** * @brief qed_sp_pf_start - PF Function Start Ramrod @@ -343,7 +344,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum mf_mode mode); + enum qed_mf_mode mode); /** * @brief qed_sp_pf_stop - PF Function Stop Ramrod diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 6f7879136633..1c06c37d4c3d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -23,15 +23,13 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, - u32 cid, - u16 opaque_fid, u8 cmd, u8 protocol, - struct qed_sp_init_request_params *p_params) + struct qed_sp_init_data *p_data) { - int rc = -EINVAL; + u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; struct qed_spq_entry *p_ent = NULL; - u32 opaque_cid = opaque_fid << 16 | cid; + int rc; if (!pp_ent) return -ENOMEM; @@ -48,7 +46,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, p_ent->elem.hdr.protocol_id = protocol; p_ent->priority = QED_SPQ_PRIORITY_NORMAL; - p_ent->comp_mode = p_params->comp_mode; + p_ent->comp_mode = p_data->comp_mode; p_ent->comp_done.done = 0; switch (p_ent->comp_mode) { @@ -57,17 +55,17 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, break; case QED_SPQ_MODE_BLOCK: - if (!p_params->p_comp_data) + if (!p_data->p_comp_data) return -EINVAL; - p_ent->comp_cb.cookie = p_params->p_comp_data->cookie; + p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; break; case QED_SPQ_MODE_CB: - if (!p_params->p_comp_data) + if (!p_data->p_comp_data) p_ent->comp_cb.function = NULL; else - p_ent->comp_cb = *p_params->p_comp_data; + p_ent->comp_cb = *p_data->p_comp_data; break; default: @@ -83,37 +81,35 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); - if (p_params->ramrod_data_size) - memset(&p_ent->ramrod, 0, p_params->ramrod_data_size); + + memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); return 0; } int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum mf_mode mode) + enum qed_mf_mode mode) { - struct qed_sp_init_request_params params; struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); - memset(¶ms, 0, sizeof(params)); - params.ramrod_data_size = sizeof(*p_ramrod); - params.comp_mode = QED_SPQ_MODE_EBLOCK; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - rc = qed_sp_init_request(p_hwfn, - &p_ent, - qed_spq_get_cid(p_hwfn), - p_hwfn->hw_info.opaque_fid, + rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, - ¶ms); + &init_data); if (rc) return rc; @@ -125,26 +121,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); p_ramrod->mf_mode = mode; + switch (mode) { + case QED_MF_DEFAULT: + case QED_MF_NPAR: + p_ramrod->mf_mode = MF_NPAR; + break; + case QED_MF_OVLAN: + p_ramrod->mf_mode = MF_OVLAN; + break; + default: + DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + p_ramrod->mf_mode = MF_NPAR; + } p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ - p_ramrod->event_ring_pbl_addr.hi = - DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); - p_ramrod->event_ring_pbl_addr.lo = - DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); + DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, + p_hwfn->p_eq->chain.pbl.p_phys_table); p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; - p_ramrod->consolid_q_pbl_addr.hi = - DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); - p_ramrod->consolid_q_pbl_addr.lo = - DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, + p_hwfn->p_consq->chain.pbl.p_phys_table); p_hwfn->hw_info.personality = PERSONALITY_ETH; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n", + "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, - (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf", p_ramrod->outer_tag); return qed_spq_post(p_hwfn, p_ent, NULL); @@ -152,17 +155,19 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) { - struct qed_sp_init_request_params params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; - memset(¶ms, 0, sizeof(params)); - params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn), - p_hwfn->hw_info.opaque_fid, + rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, - ¶ms); + &init_data); if (rc) return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 3dd548ab8df1..89469d5aae25 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -183,10 +183,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, p_cxt->xstorm_st_context.spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr); - p_cxt->xstorm_st_context.consolid_base_addr.lo = - DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr); - p_cxt->xstorm_st_context.consolid_base_addr.hi = - DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr); + DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, + p_hwfn->p_consq->chain.p_phys_addr); } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, @@ -327,7 +325,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq; /* Allocate EQ struct */ - p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC); + p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); if (!p_eq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n"); return NULL; @@ -423,8 +421,7 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_virt = p_spq->p_virt; for (i = 0; i < p_spq->chain.capacity; i++) { - p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys); - p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys); + DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); list_add_tail(&p_virt->list, &p_spq->free_pool); @@ -457,7 +454,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) /* SPQ struct */ p_spq = - kzalloc(sizeof(struct qed_spq), GFP_ATOMIC); + kzalloc(sizeof(struct qed_spq), GFP_KERNEL); if (!p_spq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); return -ENOMEM; @@ -853,7 +850,7 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) struct qed_consq *p_consq; /* Allocate ConsQ struct */ - p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC); + p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); if (!p_consq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n"); return NULL; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 7c6caf7f6612..d023251544d9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -24,7 +24,7 @@ #include <linux/qed/qed_eth_if.h> #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 4 +#define QEDE_MINOR_VERSION 7 #define QEDE_REVISION_VERSION 0 #define QEDE_ENGINEERING_VERSION 0 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ @@ -100,6 +100,12 @@ struct qede_stats { u64 tx_mac_ctrl_frames; }; +struct qede_vlan { + struct list_head list; + u16 vid; + bool configured; +}; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -154,6 +160,11 @@ struct qede_dev { u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ + bool gro_disable; + struct list_head vlan_list; + u16 configured_vlans; + u16 non_configured_vlans; + bool accept_any_vlan; struct delayed_work sp_task; unsigned long sp_flags; }; @@ -173,9 +184,27 @@ enum QEDE_STATE { * skb are built only after the frame was DMA-ed. */ struct sw_rx_data { - u8 *data; + struct page *data; + dma_addr_t mapping; + unsigned int page_offset; +}; - DEFINE_DMA_UNMAP_ADDR(mapping); +enum qede_agg_state { + QEDE_AGG_STATE_NONE = 0, + QEDE_AGG_STATE_START = 1, + QEDE_AGG_STATE_ERROR = 2 +}; + +struct qede_agg_info { + struct sw_rx_data replace_buf; + dma_addr_t replace_buf_mapping; + struct sw_rx_data start_buf; + dma_addr_t start_buf_mapping; + struct eth_fast_path_rx_tpa_start_cqe start_cqe; + enum qede_agg_state agg_state; + struct sk_buff *skb; + int frag_id; + u16 vlan_tag; }; struct qede_rx_queue { @@ -187,7 +216,11 @@ struct qede_rx_queue { struct qed_chain rx_comp_ring; void __iomem *hw_rxq_prod_addr; + /* GRO */ + struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; + int rx_buf_size; + unsigned int rx_buf_seg_size; u16 num_rx_buffers; u16 rxq_id; @@ -281,6 +314,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev); #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define QEDE_RX_HDR_SIZE 256 #define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index e442b85c9a5e..c49dc10ce151 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -217,9 +217,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct qed_link_params params; u32 speed; - if (edev->dev_info.common.is_mf) { + if (!edev->dev_info.common.is_mf_default) { DP_INFO(edev, - "Link parameters can not be changed in MF mode\n"); + "Link parameters can not be changed in non-default mode\n"); return -EOPNOTSUPP; } @@ -428,7 +428,7 @@ static int qede_set_pauseparam(struct net_device *dev, struct qed_link_params params; struct qed_link_output current_link; - if (!edev->dev_info.common.is_mf) { + if (!edev->dev_info.common.is_mf_default) { DP_INFO(edev, "Pause parameters can not be updated in non-default mode\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6237f10b5119..518af329502d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -39,10 +39,10 @@ #include "qede.h" -static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede " - DRV_MODULE_VERSION "\n"; +static char version[] = + "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; -MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver"); +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -53,7 +53,7 @@ MODULE_PARM_DESC(debug, " Default debug msglevel"); static const struct qed_eth_ops *qed_ops; #define CHIP_NUM_57980S_40 0x1634 -#define CHIP_NUM_57980S_10 0x1635 +#define CHIP_NUM_57980S_10 0x1666 #define CHIP_NUM_57980S_MF 0x1636 #define CHIP_NUM_57980S_100 0x1644 #define CHIP_NUM_57980S_50 0x1654 @@ -330,15 +330,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, struct eth_tx_3rd_bd *third_bd) { u8 l4_proto; - u16 bd2_bits = 0, bd2_bits2 = 0; + u16 bd2_bits1 = 0, bd2_bits2 = 0; - bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); + bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); - bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & + bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; - bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) @@ -347,16 +347,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, l4_proto = ip_hdr(skb)->protocol; if (l4_proto == IPPROTO_UDP) - bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; - if (third_bd) { + if (third_bd) third_bd->data.bitfields |= - ((tcp_hdrlen(skb) / 4) & - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT; - } + cpu_to_le16(((tcp_hdrlen(skb) / 4) & + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); - second_bd->data.bitfields = cpu_to_le16(bd2_bits); + second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); } @@ -381,6 +380,28 @@ static int map_frag_to_bd(struct qede_dev *edev, return 0; } +/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) +static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, + u8 xmit_type) +{ + int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; + + if (xmit_type & XMIT_LSO) { + int hlen; + + hlen = skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data; + + /* linear payload would require its own BD */ + if (skb_headlen(skb) > hlen) + allowed_frags--; + } + + return (skb_shinfo(skb)->nr_frags > allowed_frags); +} +#endif + /* Main transmit function */ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, @@ -408,16 +429,22 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, txq = QEDE_TX_QUEUE(edev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index); - /* Current code doesn't support SKB linearization, since the max number - * of skb frags can be passed in the FW HSI. - */ - BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET); - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) + if (qede_pkt_req_lin(edev, skb, xmit_type)) { + if (skb_linearize(skb)) { + DP_NOTICE(edev, + "SKB linearization failed - silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; txq->sw_tx_ring[idx].skb = skb; @@ -464,12 +491,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { + u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; + /* We don't re-calculate IP checksum as it is already done by * the upper stack */ first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + first_bd->data.bitfields |= cpu_to_le16(temp); + /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't * support parsing IPv6 with extension header/s. @@ -491,7 +522,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* @@@TBD - if will not be removed need to check */ third_bd->data.bitfields |= - (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT)); /* Make life easier for FW guys who can't deal with header and * data on same BD. If we need to split, use the second bd... @@ -719,26 +750,52 @@ static bool qede_has_tx_work(struct qede_fastpath *fp) return false; } -/* This function copies the Rx buffer from the CONS position to the PROD - * position, since we failed to allocate a new Rx buffer. +/* This function reuses the buffer(from an offset) from + * consumer index to producer index in the bd ring */ -static void qede_reuse_rx_data(struct qede_rx_queue *rxq) +static inline void qede_reuse_page(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) { - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *sw_rx_data_cons = - &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - struct sw_rx_data *sw_rx_data_prod = - &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + struct sw_rx_data *curr_prod; + dma_addr_t new_mapping; - dma_unmap_addr_set(sw_rx_data_prod, mapping, - dma_unmap_addr(sw_rx_data_cons, mapping)); + curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + *curr_prod = *curr_cons; - sw_rx_data_prod->data = sw_rx_data_cons->data; - memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); + new_mapping = curr_prod->mapping + curr_prod->page_offset; + + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); - rxq->sw_rx_cons++; rxq->sw_rx_prod++; + curr_cons->data = NULL; +} + +static inline int qede_realloc_rx_buffer(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + /* Move to the next segment in the page */ + curr_cons->page_offset += rxq->rx_buf_seg_size; + + if (curr_cons->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + return -ENOMEM; + + dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + } else { + /* Increment refcount of the page as we don't want + * network stack to take the ownership of the page + * which can be recycled multiple times by the driver. + */ + atomic_inc(&curr_cons->data->_count); + qede_reuse_page(edev, rxq, curr_cons); + } + + return 0; } static inline void qede_update_rx_prod(struct qede_dev *edev, @@ -809,6 +866,281 @@ static inline void qede_skb_receive(struct qede_dev *edev, napi_gro_receive(&fp->napi, skb); } +static void qede_set_gro_params(struct qede_dev *edev, + struct sk_buff *skb, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); + + if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & + PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - + cqe->header_len; +} + +static int qede_fill_frag_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + u8 tpa_agg_index, + u16 len_on_bd) +{ + struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & + NUM_RX_BDS_MAX]; + struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; + struct sk_buff *skb = tpa_info->skb; + + if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START)) + goto out; + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, tpa_info->frag_id++, + current_bd->data, current_bd->page_offset, + len_on_bd); + + if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + goto out; + } + + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; + + skb->data_len += len_on_bd; + skb->truesize += rxq->rx_buf_seg_size; + skb->len += len_on_bd; + + return 0; + +out: + return -ENOMEM; +} + +static void qede_tpa_start(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + dma_addr_t mapping = tpa_info->replace_buf_mapping; + struct sw_rx_data *sw_rx_data_cons; + struct sw_rx_data *sw_rx_data_prod; + enum pkt_hash_types rxhash_type; + u32 rxhash; + + sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + + /* Use pre-allocated replacement buffer - we can't release the agg. + * start until its over and we don't want to risk allocation failing + * here, so re-allocate when aggregation will be over. + */ + dma_unmap_addr_set(sw_rx_data_prod, mapping, + dma_unmap_addr(replace_buf, mapping)); + + sw_rx_data_prod->data = replace_buf->data; + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + sw_rx_data_prod->page_offset = replace_buf->page_offset; + + rxq->sw_rx_prod++; + + /* move partial skb from cons to pool (don't unmap yet) + * save mapping, incase we drop the packet later on. + */ + tpa_info->start_buf = *sw_rx_data_cons; + mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), + le32_to_cpu(rx_bd_cons->addr.lo)); + + tpa_info->start_buf_mapping = mapping; + rxq->sw_rx_cons++; + + /* set tpa state to start only if we are able to allocate skb + * for this aggregation, otherwise mark as error and aggregation will + * be dropped + */ + tpa_info->skb = netdev_alloc_skb(edev->ndev, + le16_to_cpu(cqe->len_on_first_bd)); + if (unlikely(!tpa_info->skb)) { + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + return; + } + + skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); + memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe)); + + /* Start filling in the aggregation info */ + tpa_info->frag_id = 0; + tpa_info->agg_state = QEDE_AGG_STATE_START; + + rxhash = qede_get_rxhash(edev, cqe->bitfields, + cqe->rss_hash, &rxhash_type); + skb_set_hash(tpa_info->skb, rxhash, rxhash_type); + if ((le16_to_cpu(cqe->pars_flags.flags) >> + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + else + tpa_info->vlan_tag = 0; + + /* This is needed in order to enable forwarding support */ + qede_set_gro_params(edev, tpa_info->skb, cqe); + + if (likely(cqe->ext_bd_len_list[0])) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->ext_bd_len_list[0])); + + if (unlikely(cqe->ext_bd_len_list[1])) { + DP_ERR(edev, + "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + } +} + +#ifdef CONFIG_INET +static void qede_gro_ip_csum(struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_network_header(skb, 0); + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); + + tcp_gro_complete(skb); +} + +static void qede_gro_ipv6_csum(struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_network_header(skb, 0); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); + tcp_gro_complete(skb); +} +#endif + +static void qede_gro_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct sk_buff *skb, + u16 vlan_tag) +{ +#ifdef CONFIG_INET + if (skb_shinfo(skb)->gso_size) { + switch (skb->protocol) { + case htons(ETH_P_IP): + qede_gro_ip_csum(skb); + break; + case htons(ETH_P_IPV6): + qede_gro_ipv6_csum(skb); + break; + default: + DP_ERR(edev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + ntohs(skb->protocol)); + } + } +#endif + skb_record_rx_queue(skb, fp->rss_id); + qede_skb_receive(edev, fp, skb, vlan_tag); +} + +static inline void qede_tpa_cont(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_cont_cqe *cqe) +{ + int i; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA cont with more than a single len_list entry\n"); +} + +static void qede_tpa_end(struct qede_dev *edev, + struct qede_fastpath *fp, + struct eth_fast_path_rx_tpa_end_cqe *cqe) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_agg_info *tpa_info; + struct sk_buff *skb; + int i; + + tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + skb = tpa_info->skb; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA emd with more than a single len_list entry\n"); + + if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START)) + goto err; + + /* Sanity */ + if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) + DP_ERR(edev, + "Strange - TPA had %02x BDs, but SKB has only %d frags\n", + cqe->num_of_bds, tpa_info->frag_id); + if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) + DP_ERR(edev, + "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", + le16_to_cpu(cqe->total_packet_len), skb->len); + + memcpy(skb->data, + page_address(tpa_info->start_buf.data) + + tpa_info->start_cqe.placement_offset + + tpa_info->start_buf.page_offset, + le16_to_cpu(tpa_info->start_cqe.len_on_first_bd)); + + /* Recycle [mapped] start buffer for the next replacement */ + tpa_info->replace_buf = tpa_info->start_buf; + tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping; + + /* Finalize the SKB */ + skb->protocol = eth_type_trans(skb, edev->ndev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); + + qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); + + tpa_info->agg_state = QEDE_AGG_STATE_NONE; + + return; +err: + /* The BD starting the aggregation is still mapped; Re-use it for + * future aggregations [as replacement buffer] + */ + memcpy(&tpa_info->replace_buf, &tpa_info->start_buf, + sizeof(struct sw_rx_data)); + tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping; + tpa_info->start_buf.data = NULL; + tpa_info->agg_state = QEDE_AGG_STATE_NONE; + dev_kfree_skb_any(tpa_info->skb); + tpa_info->skb = NULL; +} + static u8 qede_check_csum(u16 flag) { u16 csum_flag = 0; @@ -857,9 +1189,10 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; struct sk_buff *skb; + struct page *data; + __le16 flags; u16 len, pad; u32 rx_hash; - u8 *data; /* Get the CQE from the completion ring */ cqe = (union eth_rx_cqe *) @@ -873,62 +1206,135 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) goto next_cqe; } + if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { + switch (cqe_type) { + case ETH_RX_CQE_TYPE_TPA_START: + qede_tpa_start(edev, rxq, + &cqe->fast_path_tpa_start); + goto next_cqe; + case ETH_RX_CQE_TYPE_TPA_CONT: + qede_tpa_cont(edev, rxq, + &cqe->fast_path_tpa_cont); + goto next_cqe; + case ETH_RX_CQE_TYPE_TPA_END: + qede_tpa_end(edev, fp, + &cqe->fast_path_tpa_end); + goto next_rx_only; + default: + break; + } + } + /* Get the data from the SW ring */ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; data = sw_rx_data->data; fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->pkt_len); + len = le16_to_cpu(fp_cqe->len_on_first_bd); pad = fp_cqe->placement_offset; + flags = cqe->fast_path_regular.pars_flags.flags; - /* For every Rx BD consumed, we allocate a new BD so the BD ring - * is always with a fixed size. If allocation fails, we take the - * consumed BD and return it to the ring in the PROD position. - * The packet that was received on that BD will be dropped (and - * not passed to the upper stack). - */ - if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) { - dma_unmap_single(&edev->pdev->dev, - dma_unmap_addr(sw_rx_data, mapping), - rxq->rx_buf_size, DMA_FROM_DEVICE); - - /* If this is an error packet then drop it */ - parse_flag = - le16_to_cpu(cqe->fast_path_regular.pars_flags.flags); - csum_flag = qede_check_csum(parse_flag); - if (csum_flag == QEDE_CSUM_ERROR) { - DP_NOTICE(edev, - "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", - sw_comp_cons, parse_flag); - rxq->rx_hw_errors++; - kfree(data); - goto next_rx; - } - - skb = build_skb(data, 0); + /* If this is an error packet then drop it */ + parse_flag = le16_to_cpu(flags); - if (unlikely(!skb)) { - DP_NOTICE(edev, - "Build_skb failed, dropping incoming packet\n"); - kfree(data); - rxq->rx_alloc_errors++; - goto next_rx; - } - - skb_reserve(skb, pad); + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + DP_NOTICE(edev, + "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", + sw_comp_cons, parse_flag); + rxq->rx_hw_errors++; + qede_reuse_page(edev, rxq, sw_rx_data); + goto next_rx; + } - } else { + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) { DP_NOTICE(edev, - "New buffer allocation failed, dropping incoming packet and reusing its buffer\n"); - qede_reuse_rx_data(rxq); + "Build_skb failed, dropping incoming packet\n"); + qede_reuse_page(edev, rxq, sw_rx_data); rxq->rx_alloc_errors++; - goto next_cqe; + goto next_rx; } - sw_rx_data->data = NULL; + /* Copy data into SKB */ + if (len + pad <= QEDE_RX_HDR_SIZE) { + memcpy(skb_put(skb, len), + page_address(data) + pad + + sw_rx_data->page_offset, len); + qede_reuse_page(edev, rxq, sw_rx_data); + } else { + struct skb_frag_struct *frag; + unsigned int pull_len; + unsigned char *va; + + frag = &skb_shinfo(skb)->frags[0]; - skb_put(skb, len); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data, + pad + sw_rx_data->page_offset, + len, rxq->rx_buf_seg_size); + + va = skb_frag_address(frag); + pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); + + /* Align the pull_len to optimize memcpy */ + memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); + + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + if (unlikely(qede_realloc_rx_buffer(edev, rxq, + sw_rx_data))) { + DP_ERR(edev, "Failed to allocate rx buffer\n"); + rxq->rx_alloc_errors++; + goto next_cqe; + } + } + + if (fp_cqe->bd_num != 1) { + u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); + u8 num_frags; + + pkt_len -= len; + + for (num_frags = fp_cqe->bd_num - 1; num_frags > 0; + num_frags--) { + u16 cur_size = pkt_len > rxq->rx_buf_size ? + rxq->rx_buf_size : pkt_len; + + WARN_ONCE(!cur_size, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + goto next_cqe; + + rxq->sw_rx_cons++; + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + qed_chain_consume(&rxq->rx_bd_ring); + dma_unmap_page(&edev->pdev->dev, + sw_rx_data->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_fill_page_desc(skb, + skb_shinfo(skb)->nr_frags++, + sw_rx_data->data, 0, + cur_size); + + skb->truesize += PAGE_SIZE; + skb->data_len += cur_size; + skb->len += cur_size; + pkt_len -= cur_size; + } + + if (pkt_len) + DP_ERR(edev, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + } skb->protocol = eth_type_trans(skb, edev->ndev); @@ -945,9 +1351,9 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); qed_chain_consume(&rxq->rx_bd_ring); - next_rx: rxq->sw_rx_cons++; +next_rx_only: rx_pkt++; next_cqe: /* don't consume bd rx buffer */ @@ -1056,6 +1462,21 @@ static int qede_set_ucast_rx_mac(struct qede_dev *edev, return edev->ops->filter_config(edev->cdev, &filter_cmd); } +static int qede_set_ucast_rx_vlan(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + u16 vid) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.vlan_valid = 1; + filter_cmd.filter.ucast.vlan = vid; + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + void qede_fill_by_demand_stats(struct qede_dev *edev) { struct qed_eth_stats stats; @@ -1168,6 +1589,247 @@ static struct rtnl_link_stats64 *qede_get_stats64( return stats; } +static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) +{ + struct qed_update_vport_params params; + int rc; + + /* Proceed only if action actually needs to be performed */ + if (edev->accept_any_vlan == action) + return; + + memset(¶ms, 0, sizeof(params)); + + params.vport_id = 0; + params.accept_any_vlan = action; + params.update_accept_any_vlan_flg = 1; + + rc = edev->ops->vport_update(edev->cdev, ¶ms); + if (rc) { + DP_ERR(edev, "Failed to %s accept-any-vlan\n", + action ? "enable" : "disable"); + } else { + DP_INFO(edev, "%s accept-any-vlan\n", + action ? "enabled" : "disabled"); + edev->accept_any_vlan = action; + } +} + +static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan, *tmp; + int rc; + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + DP_INFO(edev, "Failed to allocate struct for vlan\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&vlan->list); + vlan->vid = vid; + vlan->configured = false; + + /* Verify vlan isn't already configured */ + list_for_each_entry(tmp, &edev->vlan_list, list) { + if (tmp->vid == vlan->vid) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "vlan already configured\n"); + kfree(vlan); + return -EEXIST; + } + } + + /* If interface is down, cache this VLAN ID and return */ + if (edev->state != QEDE_STATE_OPEN) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, VLAN %d will be configured when interface is up\n", + vid); + if (vid != 0) + edev->non_configured_vlans++; + list_add(&vlan->list, &edev->vlan_list); + + return 0; + } + + /* Check for the filter limit. + * Note - vlan0 has a reserved filter and can be added without + * worrying about quota + */ + if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || + (vlan->vid == 0)) { + rc = qede_set_ucast_rx_vlan(edev, + QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %d\n", + vlan->vid); + kfree(vlan); + return -EINVAL; + } + vlan->configured = true; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) + edev->configured_vlans++; + } else { + /* Out of quota; Activate accept-any-VLAN mode */ + if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, true); + + edev->non_configured_vlans++; + } + + list_add(&vlan->list, &edev->vlan_list); + + return 0; +} + +static void qede_del_vlan_from_list(struct qede_dev *edev, + struct qede_vlan *vlan) +{ + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + if (vlan->configured) + edev->configured_vlans--; + else + edev->non_configured_vlans--; + } + + list_del(&vlan->list); + kfree(vlan); +} + +static int qede_configure_vlan_filters(struct qede_dev *edev) +{ + int rc = 0, real_rc = 0, accept_any_vlan = 0; + struct qed_dev_eth_info *dev_info; + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return 0; + + dev_info = &edev->dev_info; + + /* Configure non-configured vlans */ + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (vlan->configured) + continue; + + /* We have used all our credits, now enable accept_any_vlan */ + if ((vlan->vid != 0) && + (edev->configured_vlans == dev_info->num_vlan_filters)) { + accept_any_vlan = 1; + continue; + } + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); + + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %u\n", + vlan->vid); + real_rc = rc; + continue; + } + + vlan->configured = true; + /* vlan0 filter doesn't consume our VLAN filter's quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans--; + edev->configured_vlans++; + } + } + + /* enable accept_any_vlan mode if we have more VLANs than credits, + * or remove accept_any_vlan mode if we've actually removed + * a non-configured vlan, and all remaining vlans are truly configured. + */ + + if (accept_any_vlan) + qede_config_accept_any_vlan(edev, true); + else if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, false); + + return real_rc; +} + +static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan = NULL; + int rc; + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); + + /* Find whether entry exists */ + list_for_each_entry(vlan, &edev->vlan_list, list) + if (vlan->vid == vid) + break; + + if (!vlan || (vlan->vid != vid)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Vlan isn't configured\n"); + return 0; + } + + if (edev->state != QEDE_STATE_OPEN) { + /* As interface is already down, we don't have a VPORT + * instance to remove vlan filter. So just update vlan list + */ + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, removing VLAN from list only\n"); + qede_del_vlan_from_list(edev, vlan); + return 0; + } + + /* Remove vlan */ + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + return -EINVAL; + } + + qede_del_vlan_from_list(edev, vlan); + + /* We have removed a VLAN - try to see if we can + * configure non-configured VLAN from the list. + */ + rc = qede_configure_vlan_filters(edev); + + return rc; +} + +static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) +{ + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return; + + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (!vlan->configured) + continue; + + vlan->configured = false; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans++; + edev->configured_vlans--; + } + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "marked vlan %d as non-configured\n", + vlan->vid); + } + + edev->accept_any_vlan = false; +} + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -1176,6 +1838,8 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, }; @@ -1220,6 +1884,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->num_tc = edev->dev_info.num_tc; + INIT_LIST_HEAD(&edev->vlan_list); + return edev; } @@ -1251,7 +1917,7 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_HIGHDMA; ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; ndev->hw_features = hw_features; @@ -1566,23 +2232,45 @@ static void qede_free_rx_buffers(struct qede_dev *edev, for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { struct sw_rx_data *rx_buf; - u8 *data; + struct page *data; rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; data = rx_buf->data; - dma_unmap_single(&edev->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - rxq->rx_buf_size, DMA_FROM_DEVICE); + dma_unmap_page(&edev->pdev->dev, + rx_buf->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); rx_buf->data = NULL; - kfree(data); + __free_page(data); + } +} + +static void qede_free_sge_mem(struct qede_dev *edev, + struct qede_rx_queue *rxq) { + int i; + + if (edev->gro_disable) + return; + + for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { + struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; + struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + + if (replace_buf) { + dma_unmap_page(&edev->pdev->dev, + dma_unmap_addr(replace_buf, mapping), + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(replace_buf->data); + } } } static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { + qede_free_sge_mem(edev, rxq); + /* Free rx buffers */ qede_free_rx_buffers(edev, rxq); @@ -1600,29 +2288,32 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct sw_rx_data *sw_rx_data; struct eth_rx_bd *rx_bd; dma_addr_t mapping; + struct page *data; u16 rx_buf_size; - u8 *data; rx_buf_size = rxq->rx_buf_size; - data = kmalloc(rx_buf_size, GFP_ATOMIC); + data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) { - DP_NOTICE(edev, "Failed to allocate Rx data\n"); + DP_NOTICE(edev, "Failed to allocate Rx data [page]\n"); return -ENOMEM; } - mapping = dma_map_single(&edev->pdev->dev, data, - rx_buf_size, DMA_FROM_DEVICE); + /* Map the entire page as it would be used + * for multiple RX buffer segment size mapping. + */ + mapping = dma_map_page(&edev->pdev->dev, data, 0, + PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - kfree(data); + __free_page(data); DP_NOTICE(edev, "Failed to map Rx buffer\n"); return -ENOMEM; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->page_offset = 0; sw_rx_data->data = data; - - dma_unmap_addr_set(sw_rx_data, mapping, mapping); + sw_rx_data->mapping = mapping; /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); @@ -1635,6 +2326,53 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, return 0; } +static int qede_alloc_sge_mem(struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + dma_addr_t mapping; + int i; + + if (edev->gro_disable) + return 0; + + if (edev->ndev->mtu > PAGE_SIZE) { + edev->gro_disable = 1; + return 0; + } + + for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { + struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; + struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + + replace_buf->data = alloc_pages(GFP_ATOMIC, 0); + if (unlikely(!replace_buf->data)) { + DP_NOTICE(edev, + "Failed to allocate TPA skb pool [replacement buffer]\n"); + goto err; + } + + mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, + rxq->rx_buf_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + DP_NOTICE(edev, + "Failed to map TPA replacement buffer\n"); + goto err; + } + + dma_unmap_addr_set(replace_buf, mapping, mapping); + tpa_info->replace_buf.page_offset = 0; + + tpa_info->replace_buf_mapping = mapping; + tpa_info->agg_state = QEDE_AGG_STATE_NONE; + } + + return 0; +err: + qede_free_sge_mem(edev, rxq); + edev->gro_disable = 1; + return -ENOMEM; +} + /* This function allocates all memory needed per Rx queue */ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) @@ -1643,13 +2381,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rxq->num_rx_buffers = edev->q_num_rx_buffers; - rxq->rx_buf_size = NET_IP_ALIGN + - ETH_OVERHEAD + - edev->ndev->mtu + - QEDE_FW_RX_ALIGN_END; + rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + + edev->ndev->mtu; + if (rxq->rx_buf_size > PAGE_SIZE) + rxq->rx_buf_size = PAGE_SIZE; + + /* Segment size to spilt a page in multiple equal parts */ + rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); /* Allocate the parallel driver ring for Rx buffers */ - size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX; + size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); if (!rxq->sw_rx_ring) { DP_ERR(edev, "Rx buffers ring allocation failed\n"); @@ -1660,7 +2401,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, - NUM_RX_BDS_MAX, + RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@ -1671,7 +2412,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, - NUM_RX_BDS_MAX, + RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); if (rc) @@ -1693,6 +2434,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, num_allocated); } + qede_alloc_sge_mem(edev, rxq); + return 0; err: @@ -1855,6 +2598,8 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, rss_id); } + + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); } static int qede_set_real_num_queues(struct qede_dev *edev) @@ -2088,11 +2833,12 @@ static int qede_stop_queues(struct qede_dev *edev) static int qede_start_queues(struct qede_dev *edev) { int rc, tc, i; - int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1; + int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; struct qed_update_vport_rss_params *rss_params = &edev->rss_params; struct qed_update_vport_params vport_update_params; struct qed_queue_start_common_params q_params; + struct qed_start_vport_params start = {0}; if (!edev->num_rss) { DP_ERR(edev, @@ -2100,10 +2846,13 @@ static int qede_start_queues(struct qede_dev *edev) return -EINVAL; } - rc = edev->ops->vport_start(cdev, vport_id, - edev->ndev->mtu, - drop_ttl0_flg, - vlan_removal_en); + start.gro_enable = !edev->gro_disable; + start.mtu = edev->ndev->mtu; + start.vport_id = 0; + start.drop_ttl0 = true; + start.remove_inner_vlan = vlan_removal_en; + + rc = edev->ops->vport_start(cdev, &start); if (rc) { DP_ERR(edev, "Start V-PORT failed %d\n", rc); @@ -2112,7 +2861,7 @@ static int qede_start_queues(struct qede_dev *edev) DP_VERBOSE(edev, NETIF_MSG_IFUP, "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", - vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); + start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); for_each_rss(i) { struct qede_fastpath *fp = &edev->fp_array[i]; @@ -2177,7 +2926,7 @@ static int qede_start_queues(struct qede_dev *edev) /* Prepare and send the vport enable */ memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = vport_id; + vport_update_params.vport_id = start.vport_id; vport_update_params.update_vport_active_flg = 1; vport_update_params.vport_active_flg = 1; @@ -2252,6 +3001,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) DP_INFO(edev, "Stopped Queues\n"); + qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); /* Release the interrupts */ @@ -2320,6 +3070,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) edev->state = QEDE_STATE_OPEN; mutex_unlock(&edev->qede_lock); + /* Program un-configured VLANs */ + qede_configure_vlan_filters(edev); + /* Ask for link-up using current configuration */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; @@ -2398,13 +3151,17 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } if (link->link_up) { - DP_NOTICE(edev, "Link is up\n"); - netif_tx_start_all_queues(edev->ndev); - netif_carrier_on(edev->ndev); + if (!netif_carrier_ok(edev->ndev)) { + DP_NOTICE(edev, "Link is up\n"); + netif_tx_start_all_queues(edev->ndev); + netif_carrier_on(edev->ndev); + } } else { - DP_NOTICE(edev, "Link is down\n"); - netif_tx_disable(edev->ndev); - netif_carrier_off(edev->ndev); + if (netif_carrier_ok(edev->ndev)) { + DP_NOTICE(edev, "Link is down\n"); + netif_tx_disable(edev->ndev); + netif_carrier_off(edev->ndev); + } } } @@ -2580,6 +3337,17 @@ static void qede_config_rx_mode(struct net_device *ndev) goto out; } + /* take care of VLAN mode */ + if (ndev->flags & IFF_PROMISC) { + qede_config_accept_any_vlan(edev, true); + } else if (!edev->non_configured_vlans) { + /* It's possible that accept_any_vlan mode is set due to a + * previous setting of IFF_PROMISC. If vlan credits are + * sufficient, disable accept_any_vlan. + */ + qede_config_accept_any_vlan(edev, false); + } + rx_mode.filter.accept_flags = accept_flags; edev->ops->filter_config(edev->cdev, &rx_mode); out: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 46bbea8e023c..55007f1e6bbc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -566,6 +566,7 @@ struct qlcnic_adapter_stats { u64 tx_dma_map_error; u64 spurious_intr; u64 mac_filter_limit_overrun; + u64 mbx_spurious_intr; }; /* @@ -1099,7 +1100,7 @@ struct qlcnic_mailbox { unsigned long status; spinlock_t queue_lock; /* Mailbox queue lock */ spinlock_t aen_lock; /* Mailbox response/AEN lock */ - atomic_t rsp_status; + u32 rsp_status; u32 num_cmds; }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 37a731be7d39..f9640d5ce6ba 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) { - atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; complete(&mbx->completion); } @@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { - if (atomic_read(&mbx->rsp_status) != rsp_status) + if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } out: @@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { - if (atomic_read(&mbx->rsp_status) != rsp_status) + if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } } @@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) { + u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_adapter *adapter = data; struct qlcnic_mailbox *mbx; - u32 mask, resp, event; unsigned long flags; mbx = adapter->ahw->mailbox; @@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) goto out; event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); - if (event & QLCNIC_MBX_ASYNC_EVENT) + if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - else - qlcnic_83xx_notify_mbx_response(mbx); + } else { + if (mbx->rsp_status != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + else + adapter->stats.mbx_spurious_intr++; + } out: mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); @@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) struct qlcnic_adapter *adapter = mbx->adapter; const struct qlcnic_mbx_ops *mbx_ops = mbx->ops; struct device *dev = &adapter->pdev->dev; - atomic_t *rsp_status = &mbx->rsp_status; struct list_head *head = &mbx->cmd_q; struct qlcnic_hardware_context *ahw; struct qlcnic_cmd_args *cmd = NULL; + unsigned long flags; ahw = adapter->ahw; @@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) return; } - atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + spin_lock_irqsave(&mbx->aen_lock, flags); + mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; + spin_unlock_irqrestore(&mbx->aen_lock, flags); spin_lock(&mbx->queue_lock); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 494e8105adee..0a2318cad34d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = { QLC_OFF(stats.mac_filter_limit_overrun)}, {"spurious intr", QLC_SIZEOF(stats.spurious_intr), QLC_OFF(stats.spurious_intr)}, - + {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr), + QLC_OFF(stats.mbx_spurious_intr)}, }; static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 997976426799..b28e73ea2c25 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, return; } skb_reserve(new_skb, NET_IP_ALIGN); + + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + memcpy(skb_put(new_skb, length), skb->data, length); + + pci_dma_sync_single_for_device(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); skb = new_skb; /* Frame error, so drop the packet. */ diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 689a4a5c8dcf..1ef03939d25f 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev) dev->netdev_ops = &qcaspi_netdev_ops; qcaspi_set_ethtool_ops(dev); dev->watchdog_timeo = QCASPI_TX_TIMEOUT; - dev->flags = IFF_MULTICAST; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->tx_queue_len = 100; qca = netdev_priv(dev); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 17d5571d0432..94f08f1e841c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1999,7 +1999,8 @@ static int rtl8169_set_speed(struct net_device *dev, goto out; if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && - (advertising & ADVERTISED_1000baseT_Full)) { + (advertising & ADVERTISED_1000baseT_Full) && + !pci_is_pcie(tp->pci_dev)) { mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); } out: @@ -4933,8 +4934,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40: - RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); - break; case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: @@ -4943,8 +4942,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); - break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: @@ -6137,28 +6134,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) sw_cnt_1ms_ini = 16000000/rg_saw_cnt; sw_cnt_1ms_ini &= 0x0fff; data = r8168_mac_ocp_read(tp, 0xd412); - data &= 0x0fff; + data &= ~0x0fff; data |= sw_cnt_1ms_ini; r8168_mac_ocp_write(tp, 0xd412, data); } data = r8168_mac_ocp_read(tp, 0xe056); - data &= 0xf0; - data |= 0x07; + data &= ~0xf0; + data |= 0x70; r8168_mac_ocp_write(tp, 0xe056, data); data = r8168_mac_ocp_read(tp, 0xe052); - data &= 0x8008; - data |= 0x6000; + data &= ~0x6000; + data |= 0x8008; r8168_mac_ocp_write(tp, 0xe052, data); data = r8168_mac_ocp_read(tp, 0xe0d6); - data &= 0x01ff; + data &= ~0x01ff; data |= 0x017f; r8168_mac_ocp_write(tp, 0xe0d6, data); data = r8168_mac_ocp_read(tp, 0xd420); - data &= 0x0fff; + data &= ~0x0fff; data |= 0x047f; r8168_mac_ocp_write(tp, 0xd420, data); @@ -7730,10 +7727,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; struct rtl8169_counters *counters = tp->counters; unsigned int start; - if (netif_running(dev)) + pm_runtime_get_noresume(&pdev->dev); + + if (netif_running(dev) && pm_runtime_active(&pdev->dev)) rtl8169_rx_missed(dev, ioaddr); do { @@ -7761,7 +7761,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) * Fetch additonal counter values missing in stats collected by driver * from tally counters. */ - rtl8169_update_counters(dev); + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(dev); /* * Subtract values fetched during initalization. @@ -7774,6 +7775,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - le16_to_cpu(tp->tc_offset.tx_aborted); + pm_runtime_put_noidle(&pdev->dev); + return stats; } @@ -7853,6 +7856,10 @@ static int rtl8169_runtime_suspend(struct device *device) rtl8169_net_suspend(dev); + /* Update counters before going runtime suspend */ + rtl8169_rx_missed(dev, tp->mmio_addr); + rtl8169_update_counters(dev); + return 0; } diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 270c4c9cac7f..4f132cf177cd 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_RENESAS config SH_ETH tristate "Renesas SuperH Ethernet support" depends on HAS_DMA - depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST + depends on ARCH_RENESAS || SUPERH || COMPILE_TEST select CRC32 select MII select MDIO_BITBANG @@ -32,7 +32,7 @@ config SH_ETH config RAVB tristate "Renesas Ethernet AVB support" depends on HAS_DMA - depends on ARCH_SHMOBILE || COMPILE_TEST + depends on ARCH_RENESAS || COMPILE_TEST select CRC32 select MII select MDIO_BITBANG diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9fbe92ac225b..b2160d1b9c71 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -2,7 +2,7 @@ * * Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com> + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * * Based on the SuperH Ethernet driver * @@ -837,6 +837,8 @@ static inline void ravb_write(struct net_device *ndev, u32 data, iowrite32(data, priv->addr + reg); } +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set); int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value); irqreturn_t ravb_ptp_interrupt(struct net_device *ndev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ac43ed914fcf..4e1a7dba7c4a 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2,7 +2,7 @@ * * Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com> + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * * Based on the SuperH Ethernet driver * @@ -42,6 +42,12 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set) +{ + ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); +} + int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) { int i; @@ -59,8 +65,7 @@ static int ravb_config(struct net_device *ndev) int error; /* Set config mode */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, - CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Check if the operating mode is changed to the config mode */ error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); if (error) @@ -72,13 +77,8 @@ static int ravb_config(struct net_device *ndev) static void ravb_set_duplex(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - u32 ecmr = ravb_read(ndev, ECMR); - if (priv->duplex) /* Full */ - ecmr |= ECMR_DM; - else /* Half */ - ecmr &= ~ECMR_DM; - ravb_write(ndev, ecmr, ECMR); + ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0); } static void ravb_set_rate(struct net_device *ndev) @@ -92,8 +92,6 @@ static void ravb_set_rate(struct net_device *ndev) case 1000: /* 1000BASE */ ravb_write(ndev, GECMR_SPEED_1000, GECMR); break; - default: - break; } } @@ -131,13 +129,8 @@ static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) { struct ravb_private *priv = container_of(ctrl, struct ravb_private, mdiobb); - u32 pir = ravb_read(priv->ndev, PIR); - if (set) - pir |= mask; - else - pir &= ~mask; - ravb_write(priv->ndev, pir, PIR); + ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); } /* MDC pin control */ @@ -393,9 +386,9 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_ring_format(ndev, RAVB_NC); #if defined(__LITTLE_ENDIAN) - ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC); + ravb_modify(ndev, CCC, CCC_BOC, 0); #else - ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC); + ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); #endif /* Set AVB RX */ @@ -418,8 +411,7 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); /* Setting the control will start the AVB-DMAC process. */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION, - CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); return 0; } @@ -493,7 +485,7 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) break; } } - ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR); + ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); } } @@ -613,13 +605,13 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) static void ravb_rcv_snd_disable(struct net_device *ndev) { /* Disable TX and RX */ - ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR); + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); } static void ravb_rcv_snd_enable(struct net_device *ndev) { /* Enable TX and RX */ - ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR); + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } /* function for waiting dma process finished */ @@ -765,8 +757,8 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - if (iss & ISS_CGIS) - result = ravb_ptp_interrupt(ndev); + if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) + result = IRQ_HANDLED; mmiowb(); spin_unlock(&priv->lock); @@ -812,8 +804,8 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0); - ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC); + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -852,8 +844,7 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_set_rate(ndev); } if (!priv->link) { - ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF, - ECMR); + ravb_modify(ndev, ECMR, ECMR_TXF, 0); new_state = true; priv->link = phydev->link; if (priv->no_avb_link) @@ -1139,7 +1130,8 @@ static int ravb_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { netif_device_detach(ndev); /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); /* Wait for DMA stopping */ error = ravb_stop_dma(ndev); if (error) { @@ -1170,7 +1162,8 @@ static int ravb_set_ringparam(struct net_device *ndev, ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - ravb_ptp_init(ndev, priv->pdev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); netif_device_attach(ndev); } @@ -1298,7 +1291,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); /* Wait for DMA stopping */ ravb_stop_dma(ndev); @@ -1311,7 +1305,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - ravb_ptp_init(ndev, priv->pdev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); } @@ -1393,7 +1388,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc--; desc->die_dt = DT_FSTART; - ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); + ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); priv->cur_tx[q] += NUM_TX_DESC; if (priv->cur_tx[q] - priv->dirty_tx[q] > @@ -1468,15 +1463,10 @@ static void ravb_set_rx_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; - u32 ecmr; spin_lock_irqsave(&priv->lock, flags); - ecmr = ravb_read(ndev, ECMR); - if (ndev->flags & IFF_PROMISC) - ecmr |= ECMR_PRM; - else - ecmr &= ~ECMR_PRM; - ravb_write(ndev, ecmr, ECMR); + ravb_modify(ndev, ECMR, ECMR_PRM, + ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } @@ -1718,7 +1708,6 @@ static int ravb_set_gti(struct net_device *ndev) static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; struct ravb_private *priv; enum ravb_chip_id chip_id; struct net_device *ndev; @@ -1750,8 +1739,7 @@ static int ravb_probe(struct platform_device *pdev) ndev->base_addr = res->start; ndev->dma = -1; - match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev); - chip_id = (enum ravb_chip_id)match->data; + chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); if (chip_id == RCAR_GEN3) irq = platform_get_irq_byname(pdev, "ch22"); @@ -1804,27 +1792,21 @@ static int ravb_probe(struct platform_device *pdev) /* Set AVB config mode */ if (chip_id == RCAR_GEN2) { - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | - CCC_OPC_CONFIG, CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Set CSEL value */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | - CCC_CSEL_HPB, CCC); + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); } else { - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | - CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | + CCC_GAC | CCC_CSEL_HPB); } - /* Set CSEL value */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB, - CCC); - /* Set GTI value */ error = ravb_set_gti(ndev); if (error) goto out_release; /* Request GTI loading */ - ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR); + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 7a8ce920c49e..57992ccc4657 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -2,7 +2,7 @@ * * Copyright (C) 2013-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com> + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,7 +21,7 @@ static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request) if (error) return error; - ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR); + ravb_modify(ndev, GCCR, request, request); return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); } @@ -185,7 +185,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, ptp.info); struct net_device *ndev = priv->ndev; unsigned long flags; - u32 gic; if (req->index) return -EINVAL; @@ -195,12 +194,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - gic = ravb_read(ndev, GIC); - if (on) - gic |= GIC_PTCE; - else - gic &= ~GIC_PTCE; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -216,7 +210,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, struct ravb_ptp_perout *perout; unsigned long flags; int error = 0; - u32 gic; if (req->index) return -EINVAL; @@ -248,9 +241,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - gic = ravb_read(ndev, GIC); - gic |= GIC_PTME; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); } } else { spin_lock_irqsave(&priv->lock, flags); @@ -259,9 +250,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - gic = ravb_read(ndev, GIC); - gic &= ~GIC_PTME; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTME, 0); } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -331,7 +320,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; - u32 gccr; priv->ptp.info = ravb_ptp_info; @@ -340,8 +328,7 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) spin_lock_irqsave(&priv->lock, flags); ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); - gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS; - ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR); + ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index dfa9e59c9442..004e2d7560fd 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3,7 +3,7 @@ * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. - * Copyright (C) 2013-2014 Cogent Embedded, Inc. + * Copyright (C) 2013-2016 Cogent Embedded, Inc. * Copyright (C) 2014 Codethink Limited * * This program is free software; you can redistribute it and/or modify it @@ -428,6 +428,13 @@ static u32 sh_eth_read(struct net_device *ndev, int enum_index) return ioread32(mdp->addr + offset); } +static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, + u32 set) +{ + sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set, + enum_index); +} + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; @@ -440,8 +447,8 @@ static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) static void sh_eth_select_mii(struct net_device *ndev) { - u32 value = 0x0; struct sh_eth_private *mdp = netdev_priv(ndev); + u32 value; switch (mdp->phy_interface) { case PHY_INTERFACE_MODE_GMII: @@ -467,10 +474,7 @@ static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); } static void sh_eth_chip_reset(struct net_device *ndev) @@ -496,8 +500,6 @@ static void sh_eth_set_rate_gether(struct net_device *ndev) case 1000: /* 1000BASE */ sh_eth_write(ndev, GECMR_1000, GECMR); break; - default: - break; } } @@ -583,12 +585,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev) switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_ELB, 0); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); - break; - default: + sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB); break; } } @@ -649,12 +649,10 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev) switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RTM, 0); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); - break; - default: + sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM); break; } } @@ -694,8 +692,6 @@ static void sh_eth_set_rate_sh7757(struct net_device *ndev) case 100:/* 100BASE */ sh_eth_write(ndev, 1, RTRATE); break; - default: - break; } } @@ -763,8 +759,6 @@ static void sh_eth_set_rate_giga(struct net_device *ndev) case 1000: /* 1000BASE */ sh_eth_write(ndev, 0x00000020, GECMR); break; - default: - break; } } @@ -924,8 +918,7 @@ static int sh_eth_reset(struct net_device *ndev) if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER); ret = sh_eth_check_reset(ndev); if (ret) @@ -949,11 +942,9 @@ static int sh_eth_reset(struct net_device *ndev) if (mdp->cd->select_mii) sh_eth_select_mii(ndev); } else { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER); mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0); } return ret; @@ -1136,11 +1127,8 @@ static void sh_eth_ring_format(struct net_device *ndev) break; sh_eth_set_receive_align(skb); - /* RX descriptor */ - rxdesc = &mdp->rx_ring[i]; /* The size of the buffer is a multiple of 32 bytes. */ buf_len = ALIGN(mdp->rx_buf_sz, 32); - rxdesc->len = cpu_to_le32(buf_len << 16); dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) { @@ -1148,6 +1136,10 @@ static void sh_eth_ring_format(struct net_device *ndev) break; } mdp->rx_skbuff[i] = skb; + + /* RX descriptor */ + rxdesc = &mdp->rx_ring[i]; + rxdesc->len = cpu_to_le32(buf_len << 16); rxdesc->addr = cpu_to_le32(dma_addr); rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); @@ -1163,7 +1155,8 @@ static void sh_eth_ring_format(struct net_device *ndev) mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); /* Mark the last entry as wrapping the ring. */ - rxdesc->status |= cpu_to_le32(RD_RDLE); + if (rxdesc) + rxdesc->status |= cpu_to_le32(RD_RDLE); memset(mdp->tx_ring, 0, tx_ringsize); @@ -1238,8 +1231,8 @@ ring_free: static int sh_eth_dev_init(struct net_device *ndev, bool start) { - int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; /* Soft Reset */ ret = sh_eth_reset(ndev); @@ -1285,7 +1278,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); - sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); + sh_eth_modify(ndev, EESR, 0, 0); if (start) { mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); @@ -1319,8 +1312,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) if (start) { /* Setting the Rx mode will start the Rx process. */ sh_eth_write(ndev, EDRRR_R, EDRRR); - - netif_start_queue(ndev); } return ret; @@ -1362,7 +1353,7 @@ static int sh_eth_txfree(struct net_device *ndev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_txdesc *txdesc; int free_num = 0; - int entry = 0; + int entry; for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { entry = mdp->dirty_tx % mdp->num_tx_ring; @@ -1403,10 +1394,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; int limit; struct sk_buff *skb; - u16 pkt_len = 0; u32 desc_status; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; dma_addr_t dma_addr; + u16 pkt_len; u32 buf_len; boguscnt = min(boguscnt, *quota); @@ -1532,15 +1523,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) static void sh_eth_rcv_snd_disable(struct net_device *ndev) { /* disable tx and rx */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & - ~(ECMR_RE | ECMR_TE), ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); } static void sh_eth_rcv_snd_enable(struct net_device *ndev) { /* enable tx and rx */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | - (ECMR_RE | ECMR_TE), ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } /* error control function */ @@ -1569,13 +1558,11 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) sh_eth_rcv_snd_disable(ndev); } else { /* Link Up */ - sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & - ~DMAC_M_ECI, EESIPR); + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); /* clear int */ - sh_eth_write(ndev, sh_eth_read(ndev, ECSR), - ECSR); - sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | - DMAC_M_ECI, EESIPR); + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, + DMAC_M_ECI); /* enable tx and rx */ sh_eth_rcv_snd_enable(ndev); } @@ -1765,9 +1752,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) mdp->cd->set_rate(ndev); } if (!mdp->link) { - sh_eth_write(ndev, - sh_eth_read(ndev, ECMR) & ~ECMR_TXF, - ECMR); + sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); new_state = 1; mdp->link = phydev->link; if (mdp->cd->no_psr || mdp->no_ether_link) @@ -1791,7 +1776,7 @@ static int sh_eth_phy_init(struct net_device *ndev) { struct device_node *np = ndev->dev.parent->of_node; struct sh_eth_private *mdp = netdev_priv(ndev); - struct phy_device *phydev = NULL; + struct phy_device *phydev; mdp->link = 0; mdp->speed = 0; @@ -2245,8 +2230,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { /* network device open function */ static int sh_eth_open(struct net_device *ndev) { - int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; pm_runtime_get_sync(&mdp->pdev->dev); @@ -2274,6 +2259,8 @@ static int sh_eth_open(struct net_device *ndev) if (ret) goto out_free_irq; + netif_start_queue(ndev); + mdp->is_opened = 1; return ret; @@ -2317,6 +2304,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) /* device init */ sh_eth_dev_init(ndev, true); + + netif_start_queue(ndev); } /* Packet transmit function */ @@ -2922,8 +2911,6 @@ static const u16 *sh_eth_get_register_offset(int register_type) case SH_ETH_REG_FAST_SH3_SH2: reg_offset = sh_eth_offset_fast_sh3_sh2; break; - default: - break; } return reg_offset; @@ -3003,12 +2990,12 @@ static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) static int sh_eth_drv_probe(struct platform_device *pdev) { - int ret, devno = 0; struct resource *res; - struct net_device *ndev = NULL; - struct sh_eth_private *mdp = NULL; struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); const struct platform_device_id *id = platform_get_device_id(pdev); + struct sh_eth_private *mdp; + struct net_device *ndev; + int ret, devno; /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -3061,15 +3048,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->ether_link_active_low = pd->ether_link_active_low; /* set cpu data */ - if (id) { + if (id) mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; - } else { - const struct of_device_id *match; + else + mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); - match = of_match_device(of_match_ptr(sh_eth_match_table), - &pdev->dev); - mdp->cd = (struct sh_eth_cpu_data *)match->data; - } mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); if (!mdp->reg_offset) { dev_err(&pdev->dev, "Unknown register type (%d)\n", diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile index f85fb12f36f1..faa36acee223 100644 --- a/drivers/net/ethernet/rocker/Makefile +++ b/drivers/net/ethernet/rocker/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_ROCKER) += rocker.o +rocker-y := rocker_main.o rocker_tlv.o rocker_ofdpa.o diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c deleted file mode 100644 index 166a7fc87e2f..000000000000 --- a/drivers/net/ethernet/rocker/rocker.c +++ /dev/null @@ -1,5495 +0,0 @@ -/* - * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver - * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> - * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/sched.h> -#include <linux/wait.h> -#include <linux/spinlock.h> -#include <linux/hashtable.h> -#include <linux/crc32.h> -#include <linux/sort.h> -#include <linux/random.h> -#include <linux/netdevice.h> -#include <linux/inetdevice.h> -#include <linux/skbuff.h> -#include <linux/socket.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/if_ether.h> -#include <linux/if_vlan.h> -#include <linux/if_bridge.h> -#include <linux/bitops.h> -#include <linux/ctype.h> -#include <net/switchdev.h> -#include <net/rtnetlink.h> -#include <net/ip_fib.h> -#include <net/netevent.h> -#include <net/arp.h> -#include <linux/io-64-nonatomic-lo-hi.h> -#include <generated/utsrelease.h> - -#include "rocker.h" - -static const char rocker_driver_name[] = "rocker"; - -static const struct pci_device_id rocker_pci_id_table[] = { - {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, - {0, } -}; - -struct rocker_flow_tbl_key { - u32 priority; - enum rocker_of_dpa_table_id tbl_id; - union { - struct { - u32 in_pport; - u32 in_pport_mask; - enum rocker_of_dpa_table_id goto_tbl; - } ig_port; - struct { - u32 in_pport; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool untagged; - __be16 new_vlan_id; - } vlan; - struct { - u32 in_pport; - u32 in_pport_mask; - __be16 eth_type; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool copy_to_cpu; - } term_mac; - struct { - __be16 eth_type; - __be32 dst4; - __be32 dst4_mask; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - } ucast_routing; - struct { - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - int has_eth_dst; - int has_eth_dst_mask; - __be16 vlan_id; - u32 tunnel_id; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - bool copy_to_cpu; - } bridge; - struct { - u32 in_pport; - u32 in_pport_mask; - u8 eth_src[ETH_ALEN]; - u8 eth_src_mask[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 eth_type; - __be16 vlan_id; - __be16 vlan_id_mask; - u8 ip_proto; - u8 ip_proto_mask; - u8 ip_tos; - u8 ip_tos_mask; - u32 group_id; - } acl; - }; -}; - -struct rocker_flow_tbl_entry { - struct hlist_node entry; - u32 cmd; - u64 cookie; - struct rocker_flow_tbl_key key; - size_t key_len; - u32 key_crc32; /* key */ -}; - -struct rocker_group_tbl_entry { - struct hlist_node entry; - u32 cmd; - u32 group_id; /* key */ - u16 group_count; - u32 *group_ids; - union { - struct { - u8 pop_vlan; - } l2_interface; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - u32 group_id; - } l2_rewrite; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - bool ttl_check; - u32 group_id; - } l3_unicast; - }; -}; - -struct rocker_fdb_tbl_entry { - struct hlist_node entry; - u32 key_crc32; /* key */ - bool learned; - unsigned long touched; - struct rocker_fdb_tbl_key { - struct rocker_port *rocker_port; - u8 addr[ETH_ALEN]; - __be16 vlan_id; - } key; -}; - -struct rocker_internal_vlan_tbl_entry { - struct hlist_node entry; - int ifindex; /* key */ - u32 ref_count; - __be16 vlan_id; -}; - -struct rocker_neigh_tbl_entry { - struct hlist_node entry; - __be32 ip_addr; /* key */ - struct net_device *dev; - u32 ref_count; - u32 index; - u8 eth_dst[ETH_ALEN]; - bool ttl_check; -}; - -struct rocker_desc_info { - char *data; /* mapped */ - size_t data_size; - size_t tlv_size; - struct rocker_desc *desc; - dma_addr_t mapaddr; -}; - -struct rocker_dma_ring_info { - size_t size; - u32 head; - u32 tail; - struct rocker_desc *desc; /* mapped */ - dma_addr_t mapaddr; - struct rocker_desc_info *desc_info; - unsigned int type; -}; - -struct rocker; - -enum { - ROCKER_CTRL_LINK_LOCAL_MCAST, - ROCKER_CTRL_LOCAL_ARP, - ROCKER_CTRL_IPV4_MCAST, - ROCKER_CTRL_IPV6_MCAST, - ROCKER_CTRL_DFLT_BRIDGING, - ROCKER_CTRL_DFLT_OVS, - ROCKER_CTRL_MAX, -}; - -#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 -#define ROCKER_N_INTERNAL_VLANS 255 -#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) -#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) - -struct rocker_port { - struct net_device *dev; - struct net_device *bridge_dev; - struct rocker *rocker; - unsigned int port_number; - u32 pport; - __be16 internal_vlan_id; - int stp_state; - u32 brport_flags; - unsigned long ageing_time; - bool ctrls[ROCKER_CTRL_MAX]; - unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; - struct napi_struct napi_tx; - struct napi_struct napi_rx; - struct rocker_dma_ring_info tx_ring; - struct rocker_dma_ring_info rx_ring; -}; - -struct rocker { - struct pci_dev *pdev; - u8 __iomem *hw_addr; - struct msix_entry *msix_entries; - unsigned int port_count; - struct rocker_port **ports; - struct { - u64 id; - } hw; - spinlock_t cmd_ring_lock; /* for cmd ring accesses */ - struct rocker_dma_ring_info cmd_ring; - struct rocker_dma_ring_info event_ring; - DECLARE_HASHTABLE(flow_tbl, 16); - spinlock_t flow_tbl_lock; /* for flow tbl accesses */ - u64 flow_tbl_next_cookie; - DECLARE_HASHTABLE(group_tbl, 16); - spinlock_t group_tbl_lock; /* for group tbl accesses */ - struct timer_list fdb_cleanup_timer; - DECLARE_HASHTABLE(fdb_tbl, 16); - spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ - unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; - DECLARE_HASHTABLE(internal_vlan_tbl, 8); - spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ - DECLARE_HASHTABLE(neigh_tbl, 16); - spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ - u32 neigh_tbl_next_index; -}; - -static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; -static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; -static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; -static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; - -/* Rocker priority levels for flow table entries. Higher - * priority match takes precedence over lower priority match. - */ - -enum { - ROCKER_PRIORITY_UNKNOWN = 0, - ROCKER_PRIORITY_IG_PORT = 1, - ROCKER_PRIORITY_VLAN = 1, - ROCKER_PRIORITY_TERM_MAC_UCAST = 0, - ROCKER_PRIORITY_TERM_MAC_MCAST = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_VLAN = 3, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_TENANT = 3, - ROCKER_PRIORITY_ACL_CTRL = 3, - ROCKER_PRIORITY_ACL_NORMAL = 2, - ROCKER_PRIORITY_ACL_DFLT = 1, -}; - -static bool rocker_vlan_id_is_internal(__be16 vlan_id) -{ - u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; - u16 end = 0xffe; - u16 _vlan_id = ntohs(vlan_id); - - return (_vlan_id >= start && _vlan_id <= end); -} - -static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port, - u16 vid, bool *pop_vlan) -{ - __be16 vlan_id; - - if (pop_vlan) - *pop_vlan = false; - vlan_id = htons(vid); - if (!vlan_id) { - vlan_id = rocker_port->internal_vlan_id; - if (pop_vlan) - *pop_vlan = true; - } - - return vlan_id; -} - -static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, - __be16 vlan_id) -{ - if (rocker_vlan_id_is_internal(vlan_id)) - return 0; - - return ntohs(vlan_id); -} - -static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_bridge_master(rocker_port->bridge_dev); -} - -static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_ovs_master(rocker_port->bridge_dev); -} - -#define ROCKER_OP_FLAG_REMOVE BIT(0) -#define ROCKER_OP_FLAG_NOWAIT BIT(1) -#define ROCKER_OP_FLAG_LEARNED BIT(2) -#define ROCKER_OP_FLAG_REFRESH BIT(3) - -static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t size) -{ - struct switchdev_trans_item *elem = NULL; - gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ? - GFP_ATOMIC : GFP_KERNEL; - - /* If in transaction prepare phase, allocate the memory - * and enqueue it on a transaction. If in transaction - * commit phase, dequeue the memory from the transaction - * rather than re-allocating the memory. The idea is the - * driver code paths for prepare and commit are identical - * so the memory allocated in the prepare phase is the - * memory used in the commit phase. - */ - - if (!trans) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - } else if (switchdev_trans_ph_prepare(trans)) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - if (!elem) - return NULL; - switchdev_trans_item_enqueue(trans, elem, kfree, elem); - } else { - elem = switchdev_trans_item_dequeue(trans); - } - - return elem ? elem + 1 : NULL; -} - -static void *rocker_port_kzalloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t size) -{ - return __rocker_port_mem_alloc(rocker_port, trans, flags, size); -} - -static void *rocker_port_kcalloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t n, size_t size) -{ - return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size); -} - -static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem) -{ - struct switchdev_trans_item *elem; - - /* Frees are ignored if in transaction prepare phase. The - * memory remains on the per-port list until freed in the - * commit phase. - */ - - if (switchdev_trans_ph_prepare(trans)) - return; - - elem = (struct switchdev_trans_item *) mem - 1; - kfree(elem); -} - -struct rocker_wait { - wait_queue_head_t wait; - bool done; - bool nowait; -}; - -static void rocker_wait_reset(struct rocker_wait *wait) -{ - wait->done = false; - wait->nowait = false; -} - -static void rocker_wait_init(struct rocker_wait *wait) -{ - init_waitqueue_head(&wait->wait); - rocker_wait_reset(wait); -} - -static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags) -{ - struct rocker_wait *wait; - - wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait)); - if (!wait) - return NULL; - rocker_wait_init(wait); - return wait; -} - -static void rocker_wait_destroy(struct switchdev_trans *trans, - struct rocker_wait *wait) -{ - rocker_port_kfree(trans, wait); -} - -static bool rocker_wait_event_timeout(struct rocker_wait *wait, - unsigned long timeout) -{ - wait_event_timeout(wait->wait, wait->done, HZ / 10); - if (!wait->done) - return false; - return true; -} - -static void rocker_wait_wake_up(struct rocker_wait *wait) -{ - wait->done = true; - wake_up(&wait->wait); -} - -static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) -{ - return rocker->msix_entries[vector].vector; -} - -static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) -{ - return rocker_msix_vector(rocker_port->rocker, - ROCKER_MSIX_VEC_TX(rocker_port->port_number)); -} - -static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) -{ - return rocker_msix_vector(rocker_port->rocker, - ROCKER_MSIX_VEC_RX(rocker_port->port_number)); -} - -#define rocker_write32(rocker, reg, val) \ - writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_read32(rocker, reg) \ - readl((rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_write64(rocker, reg, val) \ - writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_read64(rocker, reg) \ - readq((rocker)->hw_addr + (ROCKER_ ## reg)) - -/***************************** - * HW basic testing functions - *****************************/ - -static int rocker_reg_test(const struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - u64 test_reg; - u64 rnd; - - rnd = prandom_u32(); - rnd >>= 1; - rocker_write32(rocker, TEST_REG, rnd); - test_reg = rocker_read32(rocker, TEST_REG); - if (test_reg != rnd * 2) { - dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", - test_reg, rnd * 2); - return -EIO; - } - - rnd = prandom_u32(); - rnd <<= 31; - rnd |= prandom_u32(); - rocker_write64(rocker, TEST_REG64, rnd); - test_reg = rocker_read64(rocker, TEST_REG64); - if (test_reg != rnd * 2) { - dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", - test_reg, rnd * 2); - return -EIO; - } - - return 0; -} - -static int rocker_dma_test_one(const struct rocker *rocker, - struct rocker_wait *wait, u32 test_type, - dma_addr_t dma_handle, const unsigned char *buf, - const unsigned char *expect, size_t size) -{ - const struct pci_dev *pdev = rocker->pdev; - int i; - - rocker_wait_reset(wait); - rocker_write32(rocker, TEST_DMA_CTRL, test_type); - - if (!rocker_wait_event_timeout(wait, HZ / 10)) { - dev_err(&pdev->dev, "no interrupt received within a timeout\n"); - return -EIO; - } - - for (i = 0; i < size; i++) { - if (buf[i] != expect[i]) { - dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", - buf[i], i, expect[i]); - return -EIO; - } - } - return 0; -} - -#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) -#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 - -static int rocker_dma_test_offset(const struct rocker *rocker, - struct rocker_wait *wait, int offset) -{ - struct pci_dev *pdev = rocker->pdev; - unsigned char *alloc; - unsigned char *buf; - unsigned char *expect; - dma_addr_t dma_handle; - int i; - int err; - - alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, - GFP_KERNEL | GFP_DMA); - if (!alloc) - return -ENOMEM; - buf = alloc + offset; - expect = buf + ROCKER_TEST_DMA_BUF_SIZE; - - dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(pdev, dma_handle)) { - err = -EIO; - goto free_alloc; - } - - rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); - rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); - - memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - - memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - - prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); - for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) - expect[i] = ~buf[i]; - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - -unmap: - pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, - PCI_DMA_BIDIRECTIONAL); -free_alloc: - kfree(alloc); - - return err; -} - -static int rocker_dma_test(const struct rocker *rocker, - struct rocker_wait *wait) -{ - int i; - int err; - - for (i = 0; i < 8; i++) { - err = rocker_dma_test_offset(rocker, wait, i); - if (err) - return err; - } - return 0; -} - -static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) -{ - struct rocker_wait *wait = dev_id; - - rocker_wait_wake_up(wait); - - return IRQ_HANDLED; -} - -static int rocker_basic_hw_test(const struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - struct rocker_wait wait; - int err; - - err = rocker_reg_test(rocker); - if (err) { - dev_err(&pdev->dev, "reg test failed\n"); - return err; - } - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), - rocker_test_irq_handler, 0, - rocker_driver_name, &wait); - if (err) { - dev_err(&pdev->dev, "cannot assign test irq\n"); - return err; - } - - rocker_wait_init(&wait); - rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); - - if (!rocker_wait_event_timeout(&wait, HZ / 10)) { - dev_err(&pdev->dev, "no interrupt received within a timeout\n"); - err = -EIO; - goto free_irq; - } - - err = rocker_dma_test(rocker, &wait); - if (err) - dev_err(&pdev->dev, "dma test failed\n"); - -free_irq: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); - return err; -} - -/****** - * TLV - ******/ - -#define ROCKER_TLV_ALIGNTO 8U -#define ROCKER_TLV_ALIGN(len) \ - (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) -#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) - -/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * | Header | Pad | Payload | Pad | - * | (struct rocker_tlv) | ing | | ing | - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * <--------------------------- tlv->len --------------------------> - */ - -static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, - int *remaining) -{ - int totlen = ROCKER_TLV_ALIGN(tlv->len); - - *remaining -= totlen; - return (struct rocker_tlv *) ((char *) tlv + totlen); -} - -static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) -{ - return remaining >= (int) ROCKER_TLV_HDRLEN && - tlv->len >= ROCKER_TLV_HDRLEN && - tlv->len <= remaining; -} - -#define rocker_tlv_for_each(pos, head, len, rem) \ - for (pos = head, rem = len; \ - rocker_tlv_ok(pos, rem); \ - pos = rocker_tlv_next(pos, &(rem))) - -#define rocker_tlv_for_each_nested(pos, tlv, rem) \ - rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ - rocker_tlv_len(tlv), rem) - -static int rocker_tlv_attr_size(int payload) -{ - return ROCKER_TLV_HDRLEN + payload; -} - -static int rocker_tlv_total_size(int payload) -{ - return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); -} - -static int rocker_tlv_padlen(int payload) -{ - return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); -} - -static int rocker_tlv_type(const struct rocker_tlv *tlv) -{ - return tlv->type; -} - -static void *rocker_tlv_data(const struct rocker_tlv *tlv) -{ - return (char *) tlv + ROCKER_TLV_HDRLEN; -} - -static int rocker_tlv_len(const struct rocker_tlv *tlv) -{ - return tlv->len - ROCKER_TLV_HDRLEN; -} - -static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) -{ - return *(u8 *) rocker_tlv_data(tlv); -} - -static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) -{ - return *(u16 *) rocker_tlv_data(tlv); -} - -static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) -{ - return *(__be16 *) rocker_tlv_data(tlv); -} - -static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) -{ - return *(u32 *) rocker_tlv_data(tlv); -} - -static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) -{ - return *(u64 *) rocker_tlv_data(tlv); -} - -static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, - const char *buf, int buf_len) -{ - const struct rocker_tlv *tlv; - const struct rocker_tlv *head = (const struct rocker_tlv *) buf; - int rem; - - memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); - - rocker_tlv_for_each(tlv, head, buf_len, rem) { - u32 type = rocker_tlv_type(tlv); - - if (type > 0 && type <= maxtype) - tb[type] = tlv; - } -} - -static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype, - const struct rocker_tlv *tlv) -{ - rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), - rocker_tlv_len(tlv)); -} - -static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, - const struct rocker_desc_info *desc_info) -{ - rocker_tlv_parse(tb, maxtype, desc_info->data, - desc_info->desc->tlv_size); -} - -static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) -{ - return (struct rocker_tlv *) ((char *) desc_info->data + - desc_info->tlv_size); -} - -static int rocker_tlv_put(struct rocker_desc_info *desc_info, - int attrtype, int attrlen, const void *data) -{ - int tail_room = desc_info->data_size - desc_info->tlv_size; - int total_size = rocker_tlv_total_size(attrlen); - struct rocker_tlv *tlv; - - if (unlikely(tail_room < total_size)) - return -EMSGSIZE; - - tlv = rocker_tlv_start(desc_info); - desc_info->tlv_size += total_size; - tlv->type = attrtype; - tlv->len = rocker_tlv_attr_size(attrlen); - memcpy(rocker_tlv_data(tlv), data, attrlen); - memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); - return 0; -} - -static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, - int attrtype, u8 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); -} - -static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, - int attrtype, u16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); -} - -static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, - int attrtype, __be16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); -} - -static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, - int attrtype, u32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); -} - -static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, - int attrtype, __be32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); -} - -static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, - int attrtype, u64 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); -} - -static struct rocker_tlv * -rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) -{ - struct rocker_tlv *start = rocker_tlv_start(desc_info); - - if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) - return NULL; - - return start; -} - -static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, - struct rocker_tlv *start) -{ - start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; -} - -static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, - const struct rocker_tlv *start) -{ - desc_info->tlv_size = (const char *) start - desc_info->data; -} - -/****************************************** - * DMA rings and descriptors manipulations - ******************************************/ - -static u32 __pos_inc(u32 pos, size_t limit) -{ - return ++pos == limit ? 0 : pos; -} - -static int rocker_desc_err(const struct rocker_desc_info *desc_info) -{ - int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; - - switch (err) { - case ROCKER_OK: - return 0; - case -ROCKER_ENOENT: - return -ENOENT; - case -ROCKER_ENXIO: - return -ENXIO; - case -ROCKER_ENOMEM: - return -ENOMEM; - case -ROCKER_EEXIST: - return -EEXIST; - case -ROCKER_EINVAL: - return -EINVAL; - case -ROCKER_EMSGSIZE: - return -EMSGSIZE; - case -ROCKER_ENOTSUP: - return -EOPNOTSUPP; - case -ROCKER_ENOBUFS: - return -ENOBUFS; - } - - return -EINVAL; -} - -static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) -{ - desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; -} - -static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) -{ - u32 comp_err = desc_info->desc->comp_err; - - return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; -} - -static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) -{ - return (void *)(uintptr_t)desc_info->desc->cookie; -} - -static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, - void *ptr) -{ - desc_info->desc->cookie = (uintptr_t) ptr; -} - -static struct rocker_desc_info * -rocker_desc_head_get(const struct rocker_dma_ring_info *info) -{ - static struct rocker_desc_info *desc_info; - u32 head = __pos_inc(info->head, info->size); - - desc_info = &info->desc_info[info->head]; - if (head == info->tail) - return NULL; /* ring full */ - desc_info->tlv_size = 0; - return desc_info; -} - -static void rocker_desc_commit(const struct rocker_desc_info *desc_info) -{ - desc_info->desc->buf_size = desc_info->data_size; - desc_info->desc->tlv_size = desc_info->tlv_size; -} - -static void rocker_desc_head_set(const struct rocker *rocker, - struct rocker_dma_ring_info *info, - const struct rocker_desc_info *desc_info) -{ - u32 head = __pos_inc(info->head, info->size); - - BUG_ON(head == info->tail); - rocker_desc_commit(desc_info); - info->head = head; - rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); -} - -static struct rocker_desc_info * -rocker_desc_tail_get(struct rocker_dma_ring_info *info) -{ - static struct rocker_desc_info *desc_info; - - if (info->tail == info->head) - return NULL; /* nothing to be done between head and tail */ - desc_info = &info->desc_info[info->tail]; - if (!rocker_desc_gen(desc_info)) - return NULL; /* gen bit not set, desc is not ready yet */ - info->tail = __pos_inc(info->tail, info->size); - desc_info->tlv_size = desc_info->desc->tlv_size; - return desc_info; -} - -static void rocker_dma_ring_credits_set(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - u32 credits) -{ - if (credits) - rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); -} - -static unsigned long rocker_dma_ring_size_fix(size_t size) -{ - return max(ROCKER_DMA_SIZE_MIN, - min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); -} - -static int rocker_dma_ring_create(const struct rocker *rocker, - unsigned int type, - size_t size, - struct rocker_dma_ring_info *info) -{ - int i; - - BUG_ON(size != rocker_dma_ring_size_fix(size)); - info->size = size; - info->type = type; - info->head = 0; - info->tail = 0; - info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), - GFP_KERNEL); - if (!info->desc_info) - return -ENOMEM; - - info->desc = pci_alloc_consistent(rocker->pdev, - info->size * sizeof(*info->desc), - &info->mapaddr); - if (!info->desc) { - kfree(info->desc_info); - return -ENOMEM; - } - - for (i = 0; i < info->size; i++) - info->desc_info[i].desc = &info->desc[i]; - - rocker_write32(rocker, DMA_DESC_CTRL(info->type), - ROCKER_DMA_DESC_CTRL_RESET); - rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); - rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); - - return 0; -} - -static void rocker_dma_ring_destroy(const struct rocker *rocker, - const struct rocker_dma_ring_info *info) -{ - rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); - - pci_free_consistent(rocker->pdev, - info->size * sizeof(struct rocker_desc), - info->desc, info->mapaddr); - kfree(info->desc_info); -} - -static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, - struct rocker_dma_ring_info *info) -{ - int i; - - BUG_ON(info->head || info->tail); - - /* When ring is consumer, we need to advance head for each desc. - * That tells hw that the desc is ready to be used by it. - */ - for (i = 0; i < info->size - 1; i++) - rocker_desc_head_set(rocker, info, &info->desc_info[i]); - rocker_desc_commit(&info->desc_info[i]); -} - -static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - int direction, size_t buf_size) -{ - struct pci_dev *pdev = rocker->pdev; - int i; - int err; - - for (i = 0; i < info->size; i++) { - struct rocker_desc_info *desc_info = &info->desc_info[i]; - struct rocker_desc *desc = &info->desc[i]; - dma_addr_t dma_handle; - char *buf; - - buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); - if (!buf) { - err = -ENOMEM; - goto rollback; - } - - dma_handle = pci_map_single(pdev, buf, buf_size, direction); - if (pci_dma_mapping_error(pdev, dma_handle)) { - kfree(buf); - err = -EIO; - goto rollback; - } - - desc_info->data = buf; - desc_info->data_size = buf_size; - dma_unmap_addr_set(desc_info, mapaddr, dma_handle); - - desc->buf_addr = dma_handle; - desc->buf_size = buf_size; - } - return 0; - -rollback: - for (i--; i >= 0; i--) { - const struct rocker_desc_info *desc_info = &info->desc_info[i]; - - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), - desc_info->data_size, direction); - kfree(desc_info->data); - } - return err; -} - -static void rocker_dma_ring_bufs_free(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - int direction) -{ - struct pci_dev *pdev = rocker->pdev; - int i; - - for (i = 0; i < info->size; i++) { - const struct rocker_desc_info *desc_info = &info->desc_info[i]; - struct rocker_desc *desc = &info->desc[i]; - - desc->buf_addr = 0; - desc->buf_size = 0; - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), - desc_info->data_size, direction); - kfree(desc_info->data); - } -} - -static int rocker_dma_rings_init(struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - int err; - - err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, - ROCKER_DMA_CMD_DEFAULT_SIZE, - &rocker->cmd_ring); - if (err) { - dev_err(&pdev->dev, "failed to create command dma ring\n"); - return err; - } - - spin_lock_init(&rocker->cmd_ring_lock); - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); - if (err) { - dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); - goto err_dma_cmd_ring_bufs_alloc; - } - - err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, - ROCKER_DMA_EVENT_DEFAULT_SIZE, - &rocker->event_ring); - if (err) { - dev_err(&pdev->dev, "failed to create event dma ring\n"); - goto err_dma_event_ring_create; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, - PCI_DMA_FROMDEVICE, PAGE_SIZE); - if (err) { - dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); - goto err_dma_event_ring_bufs_alloc; - } - rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); - return 0; - -err_dma_event_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker->event_ring); -err_dma_event_ring_create: - rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); - return err; -} - -static void rocker_dma_rings_fini(struct rocker *rocker) -{ - rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker->event_ring); - rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); -} - -static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - struct sk_buff *skb, size_t buf_len) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - - dma_handle = pci_map_single(pdev, skb->data, buf_len, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, dma_handle)) - return -EIO; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) - goto tlv_put_failure; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) - goto tlv_put_failure; - return 0; - -tlv_put_failure: - pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); - desc_info->tlv_size = 0; - return -EMSGSIZE; -} - -static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) -{ - return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; -} - -static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info) -{ - struct net_device *dev = rocker_port->dev; - struct sk_buff *skb; - size_t buf_len = rocker_port_rx_buf_len(rocker_port); - int err; - - /* Ensure that hw will see tlv_size zero in case of an error. - * That tells hw to use another descriptor. - */ - rocker_desc_cookie_ptr_set(desc_info, NULL); - desc_info->tlv_size = 0; - - skb = netdev_alloc_skb_ip_align(dev, buf_len); - if (!skb) - return -ENOMEM; - err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); - if (err) { - dev_kfree_skb_any(skb); - return err; - } - rocker_desc_cookie_ptr_set(desc_info, skb); - return 0; -} - -static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, - const struct rocker_tlv **attrs) -{ - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - size_t len; - - if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || - !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) - return; - dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); - len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); - pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); -} - -static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, - const struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; - struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); - - if (!skb) - return; - rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); - rocker_dma_rx_ring_skb_unmap(rocker, attrs); - dev_kfree_skb_any(skb); -} - -static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) -{ - const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; - const struct rocker *rocker = rocker_port->rocker; - int i; - int err; - - for (i = 0; i < rx_ring->size; i++) { - err = rocker_dma_rx_ring_skb_alloc(rocker_port, - &rx_ring->desc_info[i]); - if (err) - goto rollback; - } - return 0; - -rollback: - for (i--; i >= 0; i--) - rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); - return err; -} - -static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) -{ - const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; - const struct rocker *rocker = rocker_port->rocker; - int i; - - for (i = 0; i < rx_ring->size; i++) - rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); -} - -static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) -{ - struct rocker *rocker = rocker_port->rocker; - int err; - - err = rocker_dma_ring_create(rocker, - ROCKER_DMA_TX(rocker_port->port_number), - ROCKER_DMA_TX_DEFAULT_SIZE, - &rocker_port->tx_ring); - if (err) { - netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); - return err; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE, - ROCKER_DMA_TX_DESC_SIZE); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); - goto err_dma_tx_ring_bufs_alloc; - } - - err = rocker_dma_ring_create(rocker, - ROCKER_DMA_RX(rocker_port->port_number), - ROCKER_DMA_RX_DEFAULT_SIZE, - &rocker_port->rx_ring); - if (err) { - netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); - goto err_dma_rx_ring_create; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL, - ROCKER_DMA_RX_DESC_SIZE); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); - goto err_dma_rx_ring_bufs_alloc; - } - - err = rocker_dma_rx_ring_skbs_alloc(rocker_port); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); - goto err_dma_rx_ring_skbs_alloc; - } - rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); - - return 0; - -err_dma_rx_ring_skbs_alloc: - rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL); -err_dma_rx_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); -err_dma_rx_ring_create: - rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE); -err_dma_tx_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); - return err; -} - -static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) -{ - struct rocker *rocker = rocker_port->rocker; - - rocker_dma_rx_ring_skbs_free(rocker_port); - rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); - rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE); - rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); -} - -static void rocker_port_set_enable(const struct rocker_port *rocker_port, - bool enable) -{ - u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); - - if (enable) - val |= 1ULL << rocker_port->pport; - else - val &= ~(1ULL << rocker_port->pport); - rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); -} - -/******************************** - * Interrupt handler and helpers - ********************************/ - -static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) -{ - struct rocker *rocker = dev_id; - const struct rocker_desc_info *desc_info; - struct rocker_wait *wait; - u32 credits = 0; - - spin_lock(&rocker->cmd_ring_lock); - while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { - wait = rocker_desc_cookie_ptr_get(desc_info); - if (wait->nowait) { - rocker_desc_gen_clear(desc_info); - rocker_wait_destroy(NULL, wait); - } else { - rocker_wait_wake_up(wait); - } - credits++; - } - spin_unlock(&rocker->cmd_ring_lock); - rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); - - return IRQ_HANDLED; -} - -static void rocker_port_link_up(const struct rocker_port *rocker_port) -{ - netif_carrier_on(rocker_port->dev); - netdev_info(rocker_port->dev, "Link is up\n"); -} - -static void rocker_port_link_down(const struct rocker_port *rocker_port) -{ - netif_carrier_off(rocker_port->dev); - netdev_info(rocker_port->dev, "Link is down\n"); -} - -static int rocker_event_link_change(const struct rocker *rocker, - const struct rocker_tlv *info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; - unsigned int port_number; - bool link_up; - struct rocker_port *rocker_port; - - rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || - !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) - return -EIO; - port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; - link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); - - if (port_number >= rocker->port_count) - return -EINVAL; - - rocker_port = rocker->ports[port_number]; - if (netif_carrier_ok(rocker_port->dev) != link_up) { - if (link_up) - rocker_port_link_up(rocker_port); - else - rocker_port_link_down(rocker_port); - } - - return 0; -} - -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags); - -static int rocker_event_mac_vlan_seen(const struct rocker *rocker, - const struct rocker_tlv *info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; - unsigned int port_number; - struct rocker_port *rocker_port; - const unsigned char *addr; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; - __be16 vlan_id; - - rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || - !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || - !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) - return -EIO; - port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; - addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); - vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); - - if (port_number >= rocker->port_count) - return -EINVAL; - - rocker_port = rocker->ports[port_number]; - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - return 0; - - return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags); -} - -static int rocker_event_process(const struct rocker *rocker, - const struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; - const struct rocker_tlv *info; - u16 type; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); - if (!attrs[ROCKER_TLV_EVENT_TYPE] || - !attrs[ROCKER_TLV_EVENT_INFO]) - return -EIO; - - type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); - info = attrs[ROCKER_TLV_EVENT_INFO]; - - switch (type) { - case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: - return rocker_event_link_change(rocker, info); - case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: - return rocker_event_mac_vlan_seen(rocker, info); - } - - return -EOPNOTSUPP; -} - -static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) -{ - struct rocker *rocker = dev_id; - const struct pci_dev *pdev = rocker->pdev; - const struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { - err = rocker_desc_err(desc_info); - if (err) { - dev_err(&pdev->dev, "event desc received with err %d\n", - err); - } else { - err = rocker_event_process(rocker, desc_info); - if (err) - dev_err(&pdev->dev, "event processing failed with err %d\n", - err); - } - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); - credits++; - } - rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); - - return IRQ_HANDLED; -} - -static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) -{ - struct rocker_port *rocker_port = dev_id; - - napi_schedule(&rocker_port->napi_tx); - return IRQ_HANDLED; -} - -static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) -{ - struct rocker_port *rocker_port = dev_id; - - napi_schedule(&rocker_port->napi_rx); - return IRQ_HANDLED; -} - -/******************** - * Command interface - ********************/ - -typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv); - -typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv); - -static int rocker_cmd_exec(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - rocker_cmd_prep_cb_t prepare, void *prepare_priv, - rocker_cmd_proc_cb_t process, void *process_priv) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - struct rocker_wait *wait; - bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT); - unsigned long lock_flags; - int err; - - wait = rocker_wait_create(rocker_port, trans, flags); - if (!wait) - return -ENOMEM; - wait->nowait = nowait; - - spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); - - desc_info = rocker_desc_head_get(&rocker->cmd_ring); - if (!desc_info) { - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - err = -EAGAIN; - goto out; - } - - err = prepare(rocker_port, desc_info, prepare_priv); - if (err) { - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - goto out; - } - - rocker_desc_cookie_ptr_set(desc_info, wait); - - if (!switchdev_trans_ph_prepare(trans)) - rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); - - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - - if (nowait) - return 0; - - if (!switchdev_trans_ph_prepare(trans)) - if (!rocker_wait_event_timeout(wait, HZ / 10)) - return -EIO; - - err = rocker_desc_err(desc_info); - if (err) - return err; - - if (process) - err = process(rocker_port, desc_info, process_priv); - - rocker_desc_gen_clear(desc_info); -out: - rocker_wait_destroy(trans, wait); - return err; -} - -static int -rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - struct ethtool_cmd *ecmd = priv; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - u32 speed; - u8 duplex; - u8 autoneg; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || - !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || - !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) - return -EIO; - - speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); - duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); - autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); - - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = SUPPORTED_TP; - ecmd->phy_address = 0xff; - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; - ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; - - return 0; -} - -static int -rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - unsigned char *macaddr = priv; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - const struct rocker_tlv *attr; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; - if (!attr) - return -EIO; - - if (rocker_tlv_len(attr) != ETH_ALEN) - return -EINVAL; - - ether_addr_copy(macaddr, rocker_tlv_data(attr)); - return 0; -} - -struct port_name { - char *buf; - size_t len; -}; - -static int -rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - struct port_name *name = priv; - const struct rocker_tlv *attr; - size_t i, j, len; - const char *str; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; - if (!attr) - return -EIO; - - len = min_t(size_t, rocker_tlv_len(attr), name->len); - str = rocker_tlv_data(attr); - - /* make sure name only contains alphanumeric characters */ - for (i = j = 0; i < len; ++i) { - if (isalnum(str[i])) { - name->buf[j] = str[i]; - j++; - } - } - - if (j == 0) - return -EIO; - - name->buf[j] = '\0'; - - return 0; -} - -static int -rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct ethtool_cmd *ecmd = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, - ethtool_cmd_speed(ecmd))) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, - ecmd->duplex)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, - ecmd->autoneg)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const unsigned char *macaddr = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, - ETH_ALEN, macaddr)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - int mtu = *(int *)priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, - mtu)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, - !!(rocker_port->brport_flags & BR_LEARNING))) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_ethtool_proc, - ecmd); -} - -static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, - unsigned char *macaddr) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_macaddr_proc, - macaddr); -} - -static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_ethtool_prep, - ecmd, NULL, NULL); -} - -static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, - unsigned char *macaddr) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_macaddr_prep, - macaddr, NULL, NULL); -} - -static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, - int mtu) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_mtu_prep, - &mtu, NULL, NULL); -} - -static int rocker_port_set_learning(struct rocker_port *rocker_port, - struct switchdev_trans *trans) -{ - return rocker_cmd_exec(rocker_port, trans, 0, - rocker_cmd_set_port_learning_prep, - NULL, NULL, NULL); -} - -static int -rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.ig_port.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.ig_port.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ig_port.goto_tbl)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.vlan.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.vlan.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.vlan.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.vlan.goto_tbl)) - return -EMSGSIZE; - if (entry->key.vlan.untagged && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, - entry->key.vlan.new_vlan_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.term_mac.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.term_mac.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.term_mac.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.term_mac.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.term_mac.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.term_mac.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.term_mac.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.term_mac.goto_tbl)) - return -EMSGSIZE; - if (entry->key.term_mac.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.term_mac.copy_to_cpu)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.ucast_routing.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, - entry->key.ucast_routing.dst4)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, - entry->key.ucast_routing.dst4_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ucast_routing.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.ucast_routing.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (entry->key.bridge.has_eth_dst && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.bridge.eth_dst)) - return -EMSGSIZE; - if (entry->key.bridge.has_eth_dst_mask && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.bridge.eth_dst_mask)) - return -EMSGSIZE; - if (entry->key.bridge.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.bridge.vlan_id)) - return -EMSGSIZE; - if (entry->key.bridge.tunnel_id && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, - entry->key.bridge.tunnel_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.bridge.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.bridge.group_id)) - return -EMSGSIZE; - if (entry->key.bridge.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.bridge.copy_to_cpu)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.acl.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.acl.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->key.acl.eth_src)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_src_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.acl.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.acl.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.acl.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.acl.vlan_id_mask)) - return -EMSGSIZE; - - switch (ntohs(entry->key.acl.eth_type)) { - case ETH_P_IP: - case ETH_P_IPV6: - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, - entry->key.acl.ip_proto)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_PROTO_MASK, - entry->key.acl.ip_proto_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, - entry->key.acl.ip_tos & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_DSCP_MASK, - entry->key.acl.ip_tos_mask & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, - (entry->key.acl.ip_tos & 0xc0) >> 6)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_ECN_MASK, - (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) - return -EMSGSIZE; - break; - } - - if (entry->key.acl.group_id != ROCKER_GROUP_NONE && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.acl.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, - entry->key.tbl_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, - entry->key.priority)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - - switch (entry->key.tbl_id) { - case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: - err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_VLAN: - err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: - err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: - err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_BRIDGING: - err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: - err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, - struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, - ROCKER_GROUP_PORT_GET(entry->group_id))) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, - entry->l2_interface.pop_vlan)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l2_rewrite.group_id)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l2_rewrite.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l2_rewrite.eth_dst)) - return -EMSGSIZE; - if (entry->l2_rewrite.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l2_rewrite.vlan_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - int i; - struct rocker_tlv *group_ids; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, - entry->group_count)) - return -EMSGSIZE; - - group_ids = rocker_tlv_nest_start(desc_info, - ROCKER_TLV_OF_DPA_GROUP_IDS); - if (!group_ids) - return -EMSGSIZE; - - for (i = 0; i < entry->group_count; i++) - /* Note TLV array is 1-based */ - if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) - return -EMSGSIZE; - - rocker_tlv_nest_end(desc_info, group_ids); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l3_unicast.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l3_unicast.eth_dst)) - return -EMSGSIZE; - if (entry->l3_unicast.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l3_unicast.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, - entry->l3_unicast.ttl_check)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l3_unicast.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: - err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: - err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: - err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -/*************************************************** - * Flow, group, FDB, internal VLAN and neigh tables - ***************************************************/ - -static int rocker_init_tbls(struct rocker *rocker) -{ - hash_init(rocker->flow_tbl); - spin_lock_init(&rocker->flow_tbl_lock); - - hash_init(rocker->group_tbl); - spin_lock_init(&rocker->group_tbl_lock); - - hash_init(rocker->fdb_tbl); - spin_lock_init(&rocker->fdb_tbl_lock); - - hash_init(rocker->internal_vlan_tbl); - spin_lock_init(&rocker->internal_vlan_tbl_lock); - - hash_init(rocker->neigh_tbl); - spin_lock_init(&rocker->neigh_tbl_lock); - - return 0; -} - -static void rocker_free_tbls(struct rocker *rocker) -{ - unsigned long flags; - struct rocker_flow_tbl_entry *flow_entry; - struct rocker_group_tbl_entry *group_entry; - struct rocker_fdb_tbl_entry *fdb_entry; - struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; - struct rocker_neigh_tbl_entry *neigh_entry; - struct hlist_node *tmp; - int bkt; - - spin_lock_irqsave(&rocker->flow_tbl_lock, flags); - hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) - hash_del(&flow_entry->entry); - spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - - spin_lock_irqsave(&rocker->group_tbl_lock, flags); - hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) - hash_del(&group_entry->entry); - spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) - hash_del(&fdb_entry->entry); - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); - hash_for_each_safe(rocker->internal_vlan_tbl, bkt, - tmp, internal_vlan_entry, entry) - hash_del(&internal_vlan_entry->entry); - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); - - spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); - hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) - hash_del(&neigh_entry->entry); - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); -} - -static struct rocker_flow_tbl_entry * -rocker_flow_tbl_find(const struct rocker *rocker, - const struct rocker_flow_tbl_entry *match) -{ - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - - hash_for_each_possible(rocker->flow_tbl, found, - entry, match->key_crc32) { - if (memcmp(&found->key, &match->key, key_len) == 0) - return found; - } - - return NULL; -} - -static int rocker_flow_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - match->cookie = found->cookie; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_port_kfree(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; - } else { - found = match; - found->cookie = rocker->flow_tbl_next_cookie++; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_add, found, NULL, NULL); -} - -static int rocker_flow_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - int err = 0; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; - } - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - rocker_port_kfree(trans, match); - - if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_del, - found, NULL, NULL); - rocker_port_kfree(trans, found); - } - - return err; -} - -static int rocker_flow_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_flow_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_flow_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - enum rocker_of_dpa_table_id goto_tbl) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_IG_PORT; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; - entry->key.ig_port.in_pport = in_pport; - entry->key.ig_port.in_pport_mask = in_pport_mask; - entry->key.ig_port.goto_tbl = goto_tbl; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, __be16 vlan_id, - __be16 vlan_id_mask, - enum rocker_of_dpa_table_id goto_tbl, - bool untagged, __be16 new_vlan_id) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_VLAN; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; - entry->key.vlan.in_pport = in_pport; - entry->key.vlan.vlan_id = vlan_id; - entry->key.vlan.vlan_id_mask = vlan_id_mask; - entry->key.vlan.goto_tbl = goto_tbl; - - entry->key.vlan.untagged = untagged; - entry->key.vlan.new_vlan_id = new_vlan_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 in_pport, u32 in_pport_mask, - __be16 eth_type, const u8 *eth_dst, - const u8 *eth_dst_mask, __be16 vlan_id, - __be16 vlan_id_mask, bool copy_to_cpu, - int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - if (is_multicast_ether_addr(eth_dst)) { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; - } else { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - } - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - entry->key.term_mac.in_pport = in_pport; - entry->key.term_mac.in_pport_mask = in_pport_mask; - entry->key.term_mac.eth_type = eth_type; - ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); - ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); - entry->key.term_mac.vlan_id = vlan_id; - entry->key.term_mac.vlan_id_mask = vlan_id_mask; - entry->key.term_mac.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 vlan_id, u32 tunnel_id, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, bool copy_to_cpu) -{ - struct rocker_flow_tbl_entry *entry; - u32 priority; - bool vlan_bridging = !!vlan_id; - bool dflt = !eth_dst || (eth_dst && eth_dst_mask); - bool wild = false; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; - - if (eth_dst) { - entry->key.bridge.has_eth_dst = 1; - ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); - } - if (eth_dst_mask) { - entry->key.bridge.has_eth_dst_mask = 1; - ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); - if (!ether_addr_equal(eth_dst_mask, ff_mac)) - wild = true; - } - - priority = ROCKER_PRIORITY_UNKNOWN; - if (vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; - else if (vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; - else if (vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_VLAN; - else if (!vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; - else if (!vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; - else if (!vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_TENANT; - - entry->key.priority = priority; - entry->key.bridge.vlan_id = vlan_id; - entry->key.bridge.tunnel_id = tunnel_id; - entry->key.bridge.goto_tbl = goto_tbl; - entry->key.bridge.group_id = group_id; - entry->key.bridge.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be16 eth_type, __be32 dst, - __be32 dst_mask, u32 priority, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - entry->key.priority = priority; - entry->key.ucast_routing.eth_type = eth_type; - entry->key.ucast_routing.dst4 = dst; - entry->key.ucast_routing.dst4_mask = dst_mask; - entry->key.ucast_routing.goto_tbl = goto_tbl; - entry->key.ucast_routing.group_id = group_id; - entry->key_len = offsetof(struct rocker_flow_tbl_key, - ucast_routing.group_id); - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - const u8 *eth_src, const u8 *eth_src_mask, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 eth_type, __be16 vlan_id, - __be16 vlan_id_mask, u8 ip_proto, - u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, - u32 group_id) -{ - u32 priority; - struct rocker_flow_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - priority = ROCKER_PRIORITY_ACL_NORMAL; - if (eth_dst && eth_dst_mask) { - if (ether_addr_equal(eth_dst_mask, mcast_mac)) - priority = ROCKER_PRIORITY_ACL_DFLT; - else if (is_link_local_ether_addr(eth_dst)) - priority = ROCKER_PRIORITY_ACL_CTRL; - } - - entry->key.priority = priority; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - entry->key.acl.in_pport = in_pport; - entry->key.acl.in_pport_mask = in_pport_mask; - - if (eth_src) - ether_addr_copy(entry->key.acl.eth_src, eth_src); - if (eth_src_mask) - ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); - if (eth_dst) - ether_addr_copy(entry->key.acl.eth_dst, eth_dst); - if (eth_dst_mask) - ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); - - entry->key.acl.eth_type = eth_type; - entry->key.acl.vlan_id = vlan_id; - entry->key.acl.vlan_id_mask = vlan_id_mask; - entry->key.acl.ip_proto = ip_proto; - entry->key.acl.ip_proto_mask = ip_proto_mask; - entry->key.acl.ip_tos = ip_tos; - entry->key.acl.ip_tos_mask = ip_tos_mask; - entry->key.acl.group_id = group_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_group_tbl_entry * -rocker_group_tbl_find(const struct rocker *rocker, - const struct rocker_group_tbl_entry *match) -{ - struct rocker_group_tbl_entry *found; - - hash_for_each_possible(rocker->group_tbl, found, - entry, match->group_id) { - if (found->group_id == match->group_id) - return found; - } - - return NULL; -} - -static void rocker_group_tbl_entry_free(struct switchdev_trans *trans, - struct rocker_group_tbl_entry *entry) -{ - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - rocker_port_kfree(trans, entry->group_ids); - break; - default: - break; - } - rocker_port_kfree(trans, entry); -} - -static int rocker_group_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_group_tbl_entry_free(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; - } else { - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->group_tbl, &found->entry, found->group_id); - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_add, found, NULL, NULL); -} - -static int rocker_group_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - int err = 0; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; - } - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - rocker_group_tbl_entry_free(trans, match); - - if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_del, - found, NULL, NULL); - rocker_group_tbl_entry_free(trans, found); - } - - return err; -} - -static int rocker_group_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_group_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_group_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_interface(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u32 out_pport, - int pop_vlan) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - entry->l2_interface.pop_vlan = pop_vlan; - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = group_id; - entry->group_count = group_count; - - entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags, - group_count, sizeof(u32)); - if (!entry->group_ids) { - rocker_port_kfree(trans, entry); - return -ENOMEM; - } - memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_flood(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - return rocker_group_l2_fan_out(rocker_port, trans, flags, - group_count, group_ids, - group_id); -} - -static int rocker_group_l3_unicast(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 index, const u8 *src_mac, const u8 *dst_mac, - __be16 vlan_id, bool ttl_check, u32 pport) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L3_UNICAST(index); - if (src_mac) - ether_addr_copy(entry->l3_unicast.eth_src, src_mac); - if (dst_mac) - ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); - entry->l3_unicast.vlan_id = vlan_id; - entry->l3_unicast.ttl_check = ttl_check; - entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_neigh_tbl_entry * -rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr) -{ - struct rocker_neigh_tbl_entry *found; - - hash_for_each_possible(rocker->neigh_tbl, found, - entry, be32_to_cpu(ip_addr)) - if (found->ip_addr == ip_addr) - return found; - - return NULL; -} - -static void _rocker_neigh_add(struct rocker *rocker, - struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (!switchdev_trans_ph_commit(trans)) - entry->index = rocker->neigh_tbl_next_index++; - if (switchdev_trans_ph_prepare(trans)) - return; - entry->ref_count++; - hash_add(rocker->neigh_tbl, &entry->entry, - be32_to_cpu(entry->ip_addr)); -} - -static void _rocker_neigh_del(struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (switchdev_trans_ph_prepare(trans)) - return; - if (--entry->ref_count == 0) { - hash_del(&entry->entry); - rocker_port_kfree(trans, entry); - } -} - -static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry, - struct switchdev_trans *trans, - const u8 *eth_dst, bool ttl_check) -{ - if (eth_dst) { - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = ttl_check; - } else if (!switchdev_trans_ph_prepare(trans)) { - entry->ref_count++; - } -} - -static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be32 ip_addr, const u8 *eth_dst) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - __be16 eth_type = htons(ETH_P_IP); - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - u32 priority = 0; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - int err = 0; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = true; - _rocker_neigh_add(rocker, trans, entry); - } else if (removing) { - memcpy(entry, found, sizeof(*entry)); - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, eth_dst, true); - memcpy(entry, found, sizeof(*entry)); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (err) - goto err_out; - - /* For each active neighbor, we have an L3 unicast group and - * a /32 route to the neighbor, which uses the L3 unicast - * group. The L3 unicast group can also be referred to by - * other routes' nexthops. - */ - - err = rocker_group_l3_unicast(rocker_port, trans, flags, - entry->index, - rocker_port->dev->dev_addr, - entry->eth_dst, - rocker_port->internal_vlan_id, - entry->ttl_check, - rocker_port->pport); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) L3 unicast group index %d\n", - err, entry->index); - goto err_out; - } - - if (adding || removing) { - group_id = ROCKER_GROUP_L3_UNICAST(entry->index); - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, - eth_type, ip_addr, - inet_make_mask(32), - priority, goto_tbl, - group_id, flags); - - if (err) - netdev_err(rocker_port->dev, - "Error (%d) /32 unicast route %pI4 group 0x%08x\n", - err, &entry->ip_addr, group_id); - } - -err_out: - if (!adding) - rocker_port_kfree(trans, entry); - - return err; -} - -static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be32 ip_addr) -{ - struct net_device *dev = rocker_port->dev; - struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); - int err = 0; - - if (!n) { - n = neigh_create(&arp_tbl, &ip_addr, dev); - if (IS_ERR(n)) - return IS_ERR(n); - } - - /* If the neigh is already resolved, then go ahead and - * install the entry, otherwise start the ARP process to - * resolve the neigh. - */ - - if (n->nud_state & NUD_VALID) - err = rocker_port_ipv4_neigh(rocker_port, trans, 0, - ip_addr, n->ha); - else - neigh_event_send(n, NULL); - - neigh_release(n); - return err; -} - -static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be32 ip_addr, u32 *index) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - bool resolved = true; - int err = 0; - - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - if (found) - *index = found->index; - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - _rocker_neigh_add(rocker, trans, entry); - *index = entry->index; - resolved = false; - } else if (removing) { - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, NULL, false); - resolved = !is_zero_ether_addr(found->eth_dst); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (!adding) - rocker_port_kfree(trans, entry); - - if (err) - return err; - - /* Resolved means neigh ip_addr is resolved to neigh mac. */ - - if (!resolved) - err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr); - - return err; -} - -static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be16 vlan_id) -{ - struct rocker_port *p; - const struct rocker *rocker = rocker_port->rocker; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 *group_ids; - u8 group_count = 0; - int err = 0; - int i; - - group_ids = rocker_port_kcalloc(rocker_port, trans, flags, - rocker->port_count, sizeof(u32)); - if (!group_ids) - return -ENOMEM; - - /* Adjust the flood group for this VLAN. The flood group - * references an L2 interface group for each port in this - * VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (!p) - continue; - if (!rocker_port_is_bridged(p)) - continue; - if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { - group_ids[group_count++] = - ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); - } - } - - /* If there are no bridged ports in this VLAN, we're done */ - if (group_count == 0) - goto no_ports_in_vlan; - - err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id, - group_count, group_ids, group_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - -no_ports_in_vlan: - rocker_port_kfree(trans, group_ids); - return err; -} - -static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, bool pop_vlan) -{ - const struct rocker *rocker = rocker_port->rocker; - struct rocker_port *p; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - u32 out_pport; - int ref = 0; - int err; - int i; - - /* An L2 interface group for this port in this VLAN, but - * only when port STP state is LEARNING|FORWARDING. - */ - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) { - out_pport = rocker_port->pport; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - /* An L2 interface group for this VLAN to CPU port. - * Add when first port joins this VLAN and destroy when - * last port leaves this VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) - ref++; - } - - if ((!adding || ref != 1) && (adding || ref != 0)) - return 0; - - out_pport = 0; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for CPU port\n", err); - return err; - } - - return 0; -} - -static struct rocker_ctrl { - const u8 *eth_dst; - const u8 *eth_dst_mask; - __be16 eth_type; - bool acl; - bool bridge; - bool term; - bool copy_to_cpu; -} rocker_ctrls[] = { - [ROCKER_CTRL_LINK_LOCAL_MCAST] = { - /* pass link local multicast pkts up to CPU for filtering */ - .eth_dst = ll_mac, - .eth_dst_mask = ll_mask, - .acl = true, - }, - [ROCKER_CTRL_LOCAL_ARP] = { - /* pass local ARP pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .eth_type = htons(ETH_P_ARP), - .acl = true, - }, - [ROCKER_CTRL_IPV4_MCAST] = { - /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ - .eth_dst = ipv4_mcast, - .eth_dst_mask = ipv4_mask, - .eth_type = htons(ETH_P_IP), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_IPV6_MCAST] = { - /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ - .eth_dst = ipv6_mcast, - .eth_dst_mask = ipv6_mask, - .eth_type = htons(ETH_P_IPV6), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_BRIDGING] = { - /* flood any pkts on vlan */ - .bridge = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_OVS] = { - /* pass all pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .acl = true, - }, -}; - -static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport = rocker_port->pport; - u32 in_pport_mask = 0xffffffff; - u32 out_pport = 0; - const u8 *eth_src = NULL; - const u8 *eth_src_mask = NULL; - __be16 vlan_id_mask = htons(0xffff); - u8 ip_proto = 0; - u8 ip_proto_mask = 0; - u8 ip_tos = 0; - u8 ip_tos_mask = 0; - u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - int err; - - err = rocker_flow_tbl_acl(rocker_port, trans, flags, - in_pport, in_pport_mask, - eth_src, eth_src_mask, - ctrl->eth_dst, ctrl->eth_dst_mask, - ctrl->eth_type, - vlan_id, vlan_id_mask, - ip_proto, ip_proto_mask, - ip_tos, ip_tos_mask, - group_id); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, - const struct rocker_ctrl *ctrl, - __be16 vlan_id) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 tunnel_id = 0; - int err; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, - ctrl->eth_dst, ctrl->eth_dst_mask, - vlan_id, tunnel_id, - goto_tbl, group_id, ctrl->copy_to_cpu); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 vlan_id_mask = htons(0xffff); - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - ctrl->eth_type, ctrl->eth_dst, - ctrl->eth_dst_mask, vlan_id, - vlan_id_mask, ctrl->copy_to_cpu, - flags); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - if (ctrl->acl) - return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags, - ctrl, vlan_id); - if (ctrl->bridge) - return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags, - ctrl, vlan_id); - - if (ctrl->term) - return rocker_port_ctrl_vlan_term(rocker_port, trans, flags, - ctrl, vlan_id); - - return -EOPNOTSUPP; -} - -static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - int err = 0; - int i; - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (rocker_port->ctrls[i]) { - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - &rocker_ctrls[i], vlan_id); - if (err) - return err; - } - } - - return err; -} - -static int rocker_port_ctrl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - ctrl, htons(vid)); - if (err) - break; - } - - return err; -} - -static int rocker_port_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, u16 vid) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - u32 in_pport = rocker_port->pport; - __be16 vlan_id = htons(vid); - __be16 vlan_id_mask = htons(0xffff); - __be16 internal_vlan_id; - bool untagged; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - int err; - - internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); - - if (adding && test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already added */ - else if (!adding && !test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already removed */ - - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - if (adding) { - err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port ctrl vlan add\n", err); - goto err_out; - } - } - - err = rocker_port_vlan_l2_groups(rocker_port, trans, flags, - internal_vlan_id, untagged); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 groups\n", err); - goto err_out; - } - - err = rocker_port_vlan_flood_group(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - goto err_out; - } - - err = rocker_flow_tbl_vlan(rocker_port, trans, flags, - in_pport, vlan_id, vlan_id_mask, - goto_tbl, untagged, internal_vlan_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN table\n", err); - -err_out: - if (switchdev_trans_ph_prepare(trans)) - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - return err; -} - -static int rocker_port_ig_tbl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - enum rocker_of_dpa_table_id goto_tbl; - u32 in_pport; - u32 in_pport_mask; - int err; - - /* Normal Ethernet Frames. Matches pkts from any local physical - * ports. Goto VLAN tbl. - */ - - in_pport = 0; - in_pport_mask = 0xffff0000; - goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; - - err = rocker_flow_tbl_ig_port(rocker_port, trans, flags, - in_pport, in_pport_mask, - goto_tbl); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) ingress port table entry\n", err); - - return err; -} - -struct rocker_fdb_learn_work { - struct work_struct work; - struct rocker_port *rocker_port; - struct switchdev_trans *trans; - int flags; - u8 addr[ETH_ALEN]; - u16 vid; -}; - -static void rocker_port_fdb_learn_work(struct work_struct *work) -{ - const struct rocker_fdb_learn_work *lw = - container_of(work, struct rocker_fdb_learn_work, work); - bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); - bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); - struct switchdev_notifier_fdb_info info; - - info.addr = lw->addr; - info.vid = lw->vid; - - rtnl_lock(); - if (learned && removing) - call_switchdev_notifiers(SWITCHDEV_FDB_DEL, - lw->rocker_port->dev, &info.info); - else if (learned && !removing) - call_switchdev_notifiers(SWITCHDEV_FDB_ADD, - lw->rocker_port->dev, &info.info); - rtnl_unlock(); - - rocker_port_kfree(lw->trans, work); -} - -static int rocker_port_fdb_learn(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *addr, __be16 vlan_id) -{ - struct rocker_fdb_learn_work *lw; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 out_pport = rocker_port->pport; - u32 tunnel_id = 0; - u32 group_id = ROCKER_GROUP_NONE; - bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); - bool copy_to_cpu = false; - int err; - - if (rocker_port_is_bridged(rocker_port)) - group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - - if (!(flags & ROCKER_OP_FLAG_REFRESH)) { - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr, - NULL, vlan_id, tunnel_id, goto_tbl, - group_id, copy_to_cpu); - if (err) - return err; - } - - if (!syncing) - return 0; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw)); - if (!lw) - return -ENOMEM; - - INIT_WORK(&lw->work, rocker_port_fdb_learn_work); - - lw->rocker_port = rocker_port; - lw->trans = trans; - lw->flags = flags; - ether_addr_copy(lw->addr, addr); - lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); - - if (switchdev_trans_ph_prepare(trans)) - rocker_port_kfree(trans, lw); - else - schedule_work(&lw->work); - - return 0; -} - -static struct rocker_fdb_tbl_entry * -rocker_fdb_tbl_find(const struct rocker *rocker, - const struct rocker_fdb_tbl_entry *match) -{ - struct rocker_fdb_tbl_entry *found; - - hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) - if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) - return found; - - return NULL; -} - -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *fdb; - struct rocker_fdb_tbl_entry *found; - bool removing = (flags & ROCKER_OP_FLAG_REMOVE); - unsigned long lock_flags; - - fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb)); - if (!fdb) - return -ENOMEM; - - fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); - fdb->touched = jiffies; - fdb->key.rocker_port = rocker_port; - ether_addr_copy(fdb->key.addr, addr); - fdb->key.vlan_id = vlan_id; - fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - found = rocker_fdb_tbl_find(rocker, fdb); - - if (found) { - found->touched = jiffies; - if (removing) { - rocker_port_kfree(trans, fdb); - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - } else if (!removing) { - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->fdb_tbl, &fdb->entry, - fdb->key_crc32); - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - /* Check if adding and already exists, or removing and can't find */ - if (!found != !removing) { - rocker_port_kfree(trans, fdb); - if (!found && removing) - return 0; - /* Refreshing existing to update aging timers */ - flags |= ROCKER_OP_FLAG_REFRESH; - } - - return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id); -} - -static int rocker_port_fdb_flush(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - unsigned long lock_flags; - struct hlist_node *tmp; - int bkt; - int err = 0; - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) - return 0; - - flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - if (!found->learned) - continue; - err = rocker_port_fdb_learn(rocker_port, trans, flags, - found->key.addr, - found->key.vlan_id); - if (err) - goto err_out; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - -err_out: - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static void rocker_fdb_cleanup(unsigned long data) -{ - struct rocker *rocker = (struct rocker *)data; - struct rocker_port *rocker_port; - struct rocker_fdb_tbl_entry *entry; - struct hlist_node *tmp; - unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; - unsigned long expires; - unsigned long lock_flags; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE | - ROCKER_OP_FLAG_LEARNED; - int bkt; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) { - if (!entry->learned) - continue; - rocker_port = entry->key.rocker_port; - expires = entry->touched + rocker_port->ageing_time; - if (time_before_eq(expires, jiffies)) { - rocker_port_fdb_learn(rocker_port, NULL, - flags, entry->key.addr, - entry->key.vlan_id); - hash_del(&entry->entry); - } else if (time_before(expires, next_timer)) { - next_timer = expires; - } - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer)); -} - -static int rocker_port_router_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 eth_type; - const u8 *dst_mac_mask = ff_mac; - __be16 vlan_id_mask = htons(0xffff); - bool copy_to_cpu = false; - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - eth_type = htons(ETH_P_IP); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - if (err) - return err; - - eth_type = htons(ETH_P_IPV6); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - - return err; -} - -static int rocker_port_fwding(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - bool pop_vlan; - u32 out_pport; - __be16 vlan_id; - u16 vid; - int err; - - /* Port will be forwarding-enabled if its STP state is LEARNING - * or FORWARDING. Traffic from CPU can still egress, regardless of - * port STP state. Use L2 interface group on port VLANs as a way - * to toggle port forwarding: if forwarding is disabled, L2 - * interface group will not exist. - */ - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - flags |= ROCKER_OP_FLAG_REMOVE; - - out_pport = rocker_port->pport; - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan_id = htons(vid); - pop_vlan = rocker_vlan_id_is_internal(vlan_id); - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - return 0; -} - -static int rocker_port_stp_update(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u8 state) -{ - bool want[ROCKER_CTRL_MAX] = { 0, }; - bool prev_ctrls[ROCKER_CTRL_MAX]; - u8 uninitialized_var(prev_state); - int err; - int i; - - if (switchdev_trans_ph_prepare(trans)) { - memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls)); - prev_state = rocker_port->stp_state; - } - - if (rocker_port->stp_state == state) - return 0; - - rocker_port->stp_state = state; - - switch (state) { - case BR_STATE_DISABLED: - /* port is completely disabled */ - break; - case BR_STATE_LISTENING: - case BR_STATE_BLOCKING: - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - break; - case BR_STATE_LEARNING: - case BR_STATE_FORWARDING: - if (!rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - want[ROCKER_CTRL_IPV4_MCAST] = true; - want[ROCKER_CTRL_IPV6_MCAST] = true; - if (rocker_port_is_bridged(rocker_port)) - want[ROCKER_CTRL_DFLT_BRIDGING] = true; - else if (rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_DFLT_OVS] = true; - else - want[ROCKER_CTRL_LOCAL_ARP] = true; - break; - } - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (want[i] != rocker_port->ctrls[i]) { - int ctrl_flags = flags | - (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); - err = rocker_port_ctrl(rocker_port, trans, ctrl_flags, - &rocker_ctrls[i]); - if (err) - goto err_out; - rocker_port->ctrls[i] = want[i]; - } - } - - err = rocker_port_fdb_flush(rocker_port, trans, flags); - if (err) - goto err_out; - - err = rocker_port_fwding(rocker_port, trans, flags); - -err_out: - if (switchdev_trans_ph_prepare(trans)) { - memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); - rocker_port->stp_state = prev_state; - } - - return err; -} - -static int rocker_port_fwd_enable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will enable port */ - return 0; - - /* port is not bridged, so simulate going to FORWARDING state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_FORWARDING); -} - -static int rocker_port_fwd_disable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will disable port */ - return 0; - - /* port is not bridged, so simulate going to DISABLED state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_DISABLED); -} - -static struct rocker_internal_vlan_tbl_entry * -rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex) -{ - struct rocker_internal_vlan_tbl_entry *found; - - hash_for_each_possible(rocker->internal_vlan_tbl, found, - entry, ifindex) { - if (found->ifindex == ifindex) - return found; - } - - return NULL; -} - -static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *entry; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - int i; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return 0; - - entry->ifindex = ifindex; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (found) { - kfree(entry); - goto found; - } - - found = entry; - hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); - - for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { - if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) - continue; - found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); - goto found; - } - - netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); - -found: - found->ref_count++; - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); - - return found->vlan_id; -} - -static void -rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - unsigned long bit; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (!found) { - netdev_err(rocker_port->dev, - "ifindex (%d) not found in internal VLAN tbl\n", - ifindex); - goto not_found; - } - - if (--found->ref_count <= 0) { - bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; - clear_bit(bit, rocker->internal_vlan_bitmap); - hash_del(&found->entry); - kfree(found); - } - -not_found: - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); -} - -static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, - struct switchdev_trans *trans, __be32 dst, - int dst_len, const struct fib_info *fi, - u32 tb_id, int flags) -{ - const struct fib_nh *nh; - __be16 eth_type = htons(ETH_P_IP); - __be32 dst_mask = inet_make_mask(dst_len); - __be16 internal_vlan_id = rocker_port->internal_vlan_id; - u32 priority = fi->fib_priority; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - bool nh_on_port; - bool has_gw; - u32 index; - int err; - - /* XXX support ECMP */ - - nh = fi->fib_nh; - nh_on_port = (fi->fib_dev == rocker_port->dev); - has_gw = !!nh->nh_gw; - - if (has_gw && nh_on_port) { - err = rocker_port_ipv4_nh(rocker_port, trans, flags, - nh->nh_gw, &index); - if (err) - return err; - - group_id = ROCKER_GROUP_L3_UNICAST(index); - } else { - /* Send to CPU for processing */ - group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); - } - - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst, - dst_mask, priority, goto_tbl, - group_id, flags); - if (err) - netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", - err, &dst); - - return err; -} - -/***************** - * Net device ops - *****************/ - -static int rocker_port_open(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_dma_rings_init(rocker_port); - if (err) - return err; - - err = request_irq(rocker_msix_tx_vector(rocker_port), - rocker_tx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign tx irq\n"); - goto err_request_tx_irq; - } - - err = request_irq(rocker_msix_rx_vector(rocker_port), - rocker_rx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign rx irq\n"); - goto err_request_rx_irq; - } - - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - if (err) - goto err_fwd_enable; - - napi_enable(&rocker_port->napi_tx); - napi_enable(&rocker_port->napi_rx); - if (!dev->proto_down) - rocker_port_set_enable(rocker_port, true); - netif_start_queue(dev); - return 0; - -err_fwd_enable: - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); -err_request_rx_irq: - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); -err_request_tx_irq: - rocker_port_dma_rings_fini(rocker_port); - return err; -} - -static int rocker_port_stop(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - netif_stop_queue(dev); - rocker_port_set_enable(rocker_port, false); - napi_disable(&rocker_port->napi_rx); - napi_disable(&rocker_port->napi_tx); - rocker_port_fwd_disable(rocker_port, NULL, - ROCKER_OP_FLAG_NOWAIT); - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); - rocker_port_dma_rings_fini(rocker_port); - - return 0; -} - -static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; - struct rocker_tlv *attr; - int rem; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); - if (!attrs[ROCKER_TLV_TX_FRAGS]) - return; - rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { - const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; - dma_addr_t dma_handle; - size_t len; - - if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) - continue; - rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, - attr); - if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || - !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) - continue; - dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); - len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); - pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); - } -} - -static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - char *buf, size_t buf_len) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - struct rocker_tlv *frag; - - dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { - if (net_ratelimit()) - netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); - return -EIO; - } - frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); - if (!frag) - goto unmap_frag; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, - dma_handle)) - goto nest_cancel; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, - buf_len)) - goto nest_cancel; - rocker_tlv_nest_end(desc_info, frag); - return 0; - -nest_cancel: - rocker_tlv_nest_cancel(desc_info, frag); -unmap_frag: - pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); - return -EMSGSIZE; -} - -static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - struct rocker_tlv *frags; - int i; - int err; - - desc_info = rocker_desc_head_get(&rocker_port->tx_ring); - if (unlikely(!desc_info)) { - if (net_ratelimit()) - netdev_err(dev, "tx ring full when queue awake\n"); - return NETDEV_TX_BUSY; - } - - rocker_desc_cookie_ptr_set(desc_info, skb); - - frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); - if (!frags) - goto out; - err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, - skb->data, skb_headlen(skb)); - if (err) - goto nest_cancel; - if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { - err = skb_linearize(skb); - if (err) - goto unmap_frags; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, - skb_frag_address(frag), - skb_frag_size(frag)); - if (err) - goto unmap_frags; - } - rocker_tlv_nest_end(desc_info, frags); - - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); - - desc_info = rocker_desc_head_get(&rocker_port->tx_ring); - if (!desc_info) - netif_stop_queue(dev); - - return NETDEV_TX_OK; - -unmap_frags: - rocker_tx_desc_frags_unmap(rocker_port, desc_info); -nest_cancel: - rocker_tlv_nest_cancel(desc_info, frags); -out: - dev_kfree_skb(skb); - dev->stats.tx_dropped++; - - return NETDEV_TX_OK; -} - -static int rocker_port_set_mac_address(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); - if (err) - return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - return 0; -} - -static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int running = netif_running(dev); - int err; - -#define ROCKER_PORT_MIN_MTU 68 -#define ROCKER_PORT_MAX_MTU 9000 - - if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) - return -EINVAL; - - if (running) - rocker_port_stop(dev); - - netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); - dev->mtu = new_mtu; - - err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); - if (err) - return err; - - if (running) - err = rocker_port_open(dev); - - return err; -} - -static int rocker_port_get_phys_port_name(struct net_device *dev, - char *buf, size_t len) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - struct port_name name = { .buf = buf, .len = len }; - int err; - - err = rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_phys_name_proc, - &name); - - return err ? -EOPNOTSUPP : 0; -} - -static int rocker_port_change_proto_down(struct net_device *dev, - bool proto_down) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - if (rocker_port->dev->flags & IFF_UP) - rocker_port_set_enable(rocker_port, !proto_down); - rocker_port->dev->proto_down = proto_down; - return 0; -} - -static void rocker_port_neigh_destroy(struct neighbour *n) -{ - struct rocker_port *rocker_port = netdev_priv(n->dev); - int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; - - rocker_port_ipv4_neigh(rocker_port, NULL, - flags, ip_addr, n->ha); -} - -static const struct net_device_ops rocker_port_netdev_ops = { - .ndo_open = rocker_port_open, - .ndo_stop = rocker_port_stop, - .ndo_start_xmit = rocker_port_xmit, - .ndo_set_mac_address = rocker_port_set_mac_address, - .ndo_change_mtu = rocker_port_change_mtu, - .ndo_bridge_getlink = switchdev_port_bridge_getlink, - .ndo_bridge_setlink = switchdev_port_bridge_setlink, - .ndo_bridge_dellink = switchdev_port_bridge_dellink, - .ndo_fdb_add = switchdev_port_fdb_add, - .ndo_fdb_del = switchdev_port_fdb_del, - .ndo_fdb_dump = switchdev_port_fdb_dump, - .ndo_get_phys_port_name = rocker_port_get_phys_port_name, - .ndo_change_proto_down = rocker_port_change_proto_down, - .ndo_neigh_destroy = rocker_port_neigh_destroy, -}; - -/******************** - * swdev interface - ********************/ - -static int rocker_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - const struct rocker *rocker = rocker_port->rocker; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(rocker->hw.id); - memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - attr->u.brport_flags = rocker_port->brport_flags; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - unsigned long brport_flags) -{ - unsigned long orig_flags; - int err = 0; - - orig_flags = rocker_port->brport_flags; - rocker_port->brport_flags = brport_flags; - if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING) - err = rocker_port_set_learning(rocker_port, trans); - - if (switchdev_trans_ph_prepare(trans)) - rocker_port->brport_flags = orig_flags; - - return err; -} - -static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 ageing_time) -{ - if (!switchdev_trans_ph_prepare(trans)) { - rocker_port->ageing_time = clock_t_to_jiffies(ageing_time); - mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies); - } - - return 0; -} - -static int rocker_port_attr_set(struct net_device *dev, - const struct switchdev_attr *attr, - struct switchdev_trans *trans) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = rocker_port_stp_update(rocker_port, trans, 0, - attr->u.stp_state); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = rocker_port_brport_flags_set(rocker_port, trans, - attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - err = rocker_port_bridge_ageing_time(rocker_port, trans, - attr->u.ageing_time); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u16 vid, u16 flags) -{ - int err; - - /* XXX deal with flags for PVID and untagged */ - - err = rocker_port_vlan(rocker_port, trans, 0, vid); - if (err) - return err; - - err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); - if (err) - rocker_port_vlan(rocker_port, trans, - ROCKER_OP_FLAG_REMOVE, vid); - - return err; -} - -static int rocker_port_vlans_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_add(rocker_port, trans, - vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = 0; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - -static int rocker_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj, - struct switchdev_trans *trans) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, trans, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); - break; - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_FDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_vlan_del(struct rocker_port *rocker_port, - u16 vid, u16 flags) -{ - int err; - - err = rocker_port_router_mac(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, htons(vid)); - if (err) - return err; - - return rocker_port_vlan(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, vid); -} - -static int rocker_port_vlans_del(struct rocker_port *rocker_port, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_del(rocker_port, vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = ROCKER_OP_FLAG_REMOVE; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - -static int rocker_port_obj_del(struct net_device *dev, - const struct switchdev_obj *obj) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_del(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, NULL, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, - ROCKER_OP_FLAG_REMOVE); - break; - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_del(rocker_port, NULL, - SWITCHDEV_OBJ_PORT_FDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - struct hlist_node *tmp; - unsigned long lock_flags; - int bkt; - int err = 0; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - ether_addr_copy(fdb->addr, found->key.addr); - fdb->ndm_state = NUD_REACHABLE; - fdb->vid = rocker_port_vlan_to_vid(rocker_port, - found->key.vlan_id); - err = cb(&fdb->obj); - if (err) - break; - } - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static int rocker_port_vlan_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan->flags = 0; - if (rocker_vlan_id_is_internal(htons(vid))) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - vlan->vid_begin = vlan->vid_end = vid; - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} - -static int rocker_port_obj_dump(struct net_device *dev, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_dump(rocker_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb); - break; - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlan_dump(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj), cb); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static const struct switchdev_ops rocker_port_switchdev_ops = { - .switchdev_port_attr_get = rocker_port_attr_get, - .switchdev_port_attr_set = rocker_port_attr_set, - .switchdev_port_obj_add = rocker_port_obj_add, - .switchdev_port_obj_del = rocker_port_obj_del, - .switchdev_port_obj_dump = rocker_port_obj_dump, -}; - -/******************** - * ethtool interface - ********************/ - -static int rocker_port_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); -} - -static int rocker_port_set_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); -} - -static void rocker_port_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); -} - -static struct rocker_port_stats { - char str[ETH_GSTRING_LEN]; - int type; -} rocker_port_stats[] = { - { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, - { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, - { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, - { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, - - { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, - { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, - { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, - { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, -}; - -#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) - -static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { - memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static int -rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_stats; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) - return -EMSGSIZE; - - cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_stats) - return -EMSGSIZE; - - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - - rocker_tlv_nest_end(desc_info, cmd_stats); - - return 0; -} - -static int -rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; - const struct rocker_tlv *pattr; - u32 pport; - u64 *data = priv; - int i; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - - if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) - return -EIO; - - pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); - if (pport != rocker_port->pport) - return -EIO; - - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { - pattr = stats_attrs[rocker_port_stats[i].type]; - if (!pattr) - continue; - - data[i] = rocker_tlv_get_u64(pattr); - } - - return 0; -} - -static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, - void *priv) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_stats_prep, NULL, - rocker_cmd_get_port_stats_ethtool_proc, - priv); -} - -static void rocker_port_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { - int i; - - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) - data[i] = 0; - } -} - -static int rocker_port_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ROCKER_PORT_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static const struct ethtool_ops rocker_port_ethtool_ops = { - .get_settings = rocker_port_get_settings, - .set_settings = rocker_port_set_settings, - .get_drvinfo = rocker_port_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_strings = rocker_port_get_strings, - .get_ethtool_stats = rocker_port_get_stats, - .get_sset_count = rocker_port_get_sset_count, -}; - -/***************** - * NAPI interface - *****************/ - -static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) -{ - return container_of(napi, struct rocker_port, napi_tx); -} - -static int rocker_port_poll_tx(struct napi_struct *napi, int budget) -{ - struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); - const struct rocker *rocker = rocker_port->rocker; - const struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - /* Cleanup tx descriptors */ - while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { - struct sk_buff *skb; - - err = rocker_desc_err(desc_info); - if (err && net_ratelimit()) - netdev_err(rocker_port->dev, "tx desc received with err %d\n", - err); - rocker_tx_desc_frags_unmap(rocker_port, desc_info); - - skb = rocker_desc_cookie_ptr_get(desc_info); - if (err == 0) { - rocker_port->dev->stats.tx_packets++; - rocker_port->dev->stats.tx_bytes += skb->len; - } else { - rocker_port->dev->stats.tx_errors++; - } - - dev_kfree_skb_any(skb); - credits++; - } - - if (credits && netif_queue_stopped(rocker_port->dev)) - netif_wake_queue(rocker_port->dev); - - napi_complete(napi); - rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); - - return 0; -} - -static int rocker_port_rx_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; - struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); - size_t rx_len; - u16 rx_flags = 0; - - if (!skb) - return -ENOENT; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); - if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) - return -EINVAL; - if (attrs[ROCKER_TLV_RX_FLAGS]) - rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); - - rocker_dma_rx_ring_skb_unmap(rocker, attrs); - - rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); - skb_put(skb, rx_len); - skb->protocol = eth_type_trans(skb, rocker_port->dev); - - if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) - skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; - - rocker_port->dev->stats.rx_packets++; - rocker_port->dev->stats.rx_bytes += skb->len; - - netif_receive_skb(skb); - - return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); -} - -static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) -{ - return container_of(napi, struct rocker_port, napi_rx); -} - -static int rocker_port_poll_rx(struct napi_struct *napi, int budget) -{ - struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); - const struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - /* Process rx descriptors */ - while (credits < budget && - (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { - err = rocker_desc_err(desc_info); - if (err) { - if (net_ratelimit()) - netdev_err(rocker_port->dev, "rx desc received with err %d\n", - err); - } else { - err = rocker_port_rx_proc(rocker, rocker_port, - desc_info); - if (err && net_ratelimit()) - netdev_err(rocker_port->dev, "rx processing failed with err %d\n", - err); - } - if (err) - rocker_port->dev->stats.rx_errors++; - - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); - credits++; - } - - if (credits < budget) - napi_complete(napi); - - rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); - - return credits; -} - -/***************** - * PCI driver ops - *****************/ - -static void rocker_carrier_init(const struct rocker_port *rocker_port) -{ - const struct rocker *rocker = rocker_port->rocker; - u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); - bool link_up; - - link_up = link_status & (1 << rocker_port->pport); - if (link_up) - netif_carrier_on(rocker_port->dev); - else - netif_carrier_off(rocker_port->dev); -} - -static void rocker_remove_ports(const struct rocker *rocker) -{ - struct rocker_port *rocker_port; - int i; - - for (i = 0; i < rocker->port_count; i++) { - rocker_port = rocker->ports[i]; - if (!rocker_port) - continue; - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); - unregister_netdev(rocker_port->dev); - free_netdev(rocker_port->dev); - } - kfree(rocker->ports); -} - -static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) -{ - const struct rocker *rocker = rocker_port->rocker; - const struct pci_dev *pdev = rocker->pdev; - int err; - - err = rocker_cmd_get_port_settings_macaddr(rocker_port, - rocker_port->dev->dev_addr); - if (err) { - dev_warn(&pdev->dev, "failed to get mac address, using random\n"); - eth_hw_addr_random(rocker_port->dev); - } -} - -static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) -{ - const struct pci_dev *pdev = rocker->pdev; - struct rocker_port *rocker_port; - struct net_device *dev; - u16 untagged_vid = 0; - int err; - - dev = alloc_etherdev(sizeof(struct rocker_port)); - if (!dev) - return -ENOMEM; - rocker_port = netdev_priv(dev); - rocker_port->dev = dev; - rocker_port->rocker = rocker; - rocker_port->port_number = port_number; - rocker_port->pport = port_number + 1; - rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; - rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME; - - rocker_port_dev_addr_init(rocker_port); - dev->netdev_ops = &rocker_port_netdev_ops; - dev->ethtool_ops = &rocker_port_ethtool_ops; - dev->switchdev_ops = &rocker_port_switchdev_ops; - netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, - NAPI_POLL_WEIGHT); - netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, - NAPI_POLL_WEIGHT); - rocker_carrier_init(rocker_port); - - dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; - - err = register_netdev(dev); - if (err) { - dev_err(&pdev->dev, "register_netdev failed\n"); - goto err_register_netdev; - } - rocker->ports[port_number] = rocker_port; - - switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); - - rocker_port_set_learning(rocker_port, NULL); - - err = rocker_port_ig_tbl(rocker_port, NULL, 0); - if (err) { - netdev_err(rocker_port->dev, "install ig port table failed\n"); - goto err_port_ig_tbl; - } - - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) { - netdev_err(rocker_port->dev, "install untagged VLAN failed\n"); - goto err_untagged_vlan; - } - - return 0; - -err_untagged_vlan: - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); -err_port_ig_tbl: - rocker->ports[port_number] = NULL; - unregister_netdev(dev); -err_register_netdev: - free_netdev(dev); - return err; -} - -static int rocker_probe_ports(struct rocker *rocker) -{ - int i; - size_t alloc_size; - int err; - - alloc_size = sizeof(struct rocker_port *) * rocker->port_count; - rocker->ports = kzalloc(alloc_size, GFP_KERNEL); - if (!rocker->ports) - return -ENOMEM; - for (i = 0; i < rocker->port_count; i++) { - err = rocker_probe_port(rocker, i); - if (err) - goto remove_ports; - } - return 0; - -remove_ports: - rocker_remove_ports(rocker); - return err; -} - -static int rocker_msix_init(struct rocker *rocker) -{ - struct pci_dev *pdev = rocker->pdev; - int msix_entries; - int i; - int err; - - msix_entries = pci_msix_vec_count(pdev); - if (msix_entries < 0) - return msix_entries; - - if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) - return -EINVAL; - - rocker->msix_entries = kmalloc_array(msix_entries, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!rocker->msix_entries) - return -ENOMEM; - - for (i = 0; i < msix_entries; i++) - rocker->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); - if (err < 0) - goto err_enable_msix; - - return 0; - -err_enable_msix: - kfree(rocker->msix_entries); - return err; -} - -static void rocker_msix_fini(const struct rocker *rocker) -{ - pci_disable_msix(rocker->pdev); - kfree(rocker->msix_entries); -} - -static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct rocker *rocker; - int err; - - rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); - if (!rocker) - return -ENOMEM; - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_device failed\n"); - goto err_pci_enable_device; - } - - err = pci_request_regions(pdev, rocker_driver_name); - if (err) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); - goto err_pci_request_regions; - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); - goto err_pci_set_dma_mask; - } - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); - goto err_pci_set_dma_mask; - } - } - - if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { - dev_err(&pdev->dev, "invalid PCI region size\n"); - err = -EINVAL; - goto err_pci_resource_len_check; - } - - rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!rocker->hw_addr) { - dev_err(&pdev->dev, "ioremap failed\n"); - err = -EIO; - goto err_ioremap; - } - pci_set_master(pdev); - - rocker->pdev = pdev; - pci_set_drvdata(pdev, rocker); - - rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); - - err = rocker_msix_init(rocker); - if (err) { - dev_err(&pdev->dev, "MSI-X init failed\n"); - goto err_msix_init; - } - - err = rocker_basic_hw_test(rocker); - if (err) { - dev_err(&pdev->dev, "basic hw test failed\n"); - goto err_basic_hw_test; - } - - rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); - - err = rocker_dma_rings_init(rocker); - if (err) - goto err_dma_rings_init; - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), - rocker_cmd_irq_handler, 0, - rocker_driver_name, rocker); - if (err) { - dev_err(&pdev->dev, "cannot assign cmd irq\n"); - goto err_request_cmd_irq; - } - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), - rocker_event_irq_handler, 0, - rocker_driver_name, rocker); - if (err) { - dev_err(&pdev->dev, "cannot assign event irq\n"); - goto err_request_event_irq; - } - - rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - - err = rocker_init_tbls(rocker); - if (err) { - dev_err(&pdev->dev, "cannot init rocker tables\n"); - goto err_init_tbls; - } - - setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup, - (unsigned long) rocker); - mod_timer(&rocker->fdb_cleanup_timer, jiffies); - - err = rocker_probe_ports(rocker); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); - goto err_probe_ports; - } - - dev_info(&pdev->dev, "Rocker switch with id %*phN\n", - (int)sizeof(rocker->hw.id), &rocker->hw.id); - - return 0; - -err_probe_ports: - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); -err_init_tbls: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); -err_request_event_irq: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); -err_request_cmd_irq: - rocker_dma_rings_fini(rocker); -err_dma_rings_init: -err_basic_hw_test: - rocker_msix_fini(rocker); -err_msix_init: - iounmap(rocker->hw_addr); -err_ioremap: -err_pci_resource_len_check: -err_pci_set_dma_mask: - pci_release_regions(pdev); -err_pci_request_regions: - pci_disable_device(pdev); -err_pci_enable_device: - kfree(rocker); - return err; -} - -static void rocker_remove(struct pci_dev *pdev) -{ - struct rocker *rocker = pci_get_drvdata(pdev); - - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); - rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); - rocker_remove_ports(rocker); - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); - rocker_dma_rings_fini(rocker); - rocker_msix_fini(rocker); - iounmap(rocker->hw_addr); - pci_release_regions(rocker->pdev); - pci_disable_device(rocker->pdev); - kfree(rocker); -} - -static struct pci_driver rocker_pci_driver = { - .name = rocker_driver_name, - .id_table = rocker_pci_id_table, - .probe = rocker_probe, - .remove = rocker_remove, -}; - -/************************************ - * Net device notifier event handler - ************************************/ - -static bool rocker_port_dev_check(const struct net_device *dev) -{ - return dev->netdev_ops == &rocker_port_netdev_ops; -} - -static int rocker_port_bridge_join(struct rocker_port *rocker_port, - struct net_device *bridge) -{ - u16 untagged_vid = 0; - int err; - - /* Port is joining bridge, so the internal VLAN for the - * port is going to change to the bridge internal VLAN. - * Let's remove untagged VLAN (vid=0) from port and - * re-add once internal VLAN has changed. - */ - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - - rocker_port->bridge_dev = bridge; - switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); - - return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); -} - -static int rocker_port_bridge_leave(struct rocker_port *rocker_port) -{ - u16 untagged_vid = 0; - int err; - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->bridge_dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, - rocker_port->dev->ifindex); - - switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, - false); - rocker_port->bridge_dev = NULL; - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) - return err; - - if (rocker_port->dev->flags & IFF_UP) - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - - -static int rocker_port_ovs_changed(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err; - - rocker_port->bridge_dev = master; - - err = rocker_port_fwd_disable(rocker_port, NULL, 0); - if (err) - return err; - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - -static int rocker_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err = 0; - - if (netif_is_bridge_master(master)) - err = rocker_port_bridge_join(rocker_port, master); - else if (netif_is_ovs_master(master)) - err = rocker_port_ovs_changed(rocker_port, master); - return err; -} - -static int rocker_port_master_unlinked(struct rocker_port *rocker_port) -{ - int err = 0; - - if (rocker_port_is_bridged(rocker_port)) - err = rocker_port_bridge_leave(rocker_port); - else if (rocker_port_is_ovsed(rocker_port)) - err = rocker_port_ovs_changed(rocker_port, NULL); - return err; -} - -static int rocker_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct netdev_notifier_changeupper_info *info; - struct rocker_port *rocker_port; - int err; - - if (!rocker_port_dev_check(dev)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_CHANGEUPPER: - info = ptr; - if (!info->master) - goto out; - rocker_port = netdev_priv(dev); - if (info->linking) { - err = rocker_port_master_linked(rocker_port, - info->upper_dev); - if (err) - netdev_warn(dev, "failed to reflect master linked (err %d)\n", - err); - } else { - err = rocker_port_master_unlinked(rocker_port); - if (err) - netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", - err); - } - break; - } -out: - return NOTIFY_DONE; -} - -static struct notifier_block rocker_netdevice_nb __read_mostly = { - .notifier_call = rocker_netdevice_event, -}; - -/************************************ - * Net event notifier event handler - ************************************/ - -static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) | - ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; - - return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha); -} - -static int rocker_netevent_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev; - struct neighbour *n = ptr; - int err; - - switch (event) { - case NETEVENT_NEIGH_UPDATE: - if (n->tbl != &arp_tbl) - return NOTIFY_DONE; - dev = n->dev; - if (!rocker_port_dev_check(dev)) - return NOTIFY_DONE; - err = rocker_neigh_update(dev, n); - if (err) - netdev_warn(dev, - "failed to handle neigh update (err %d)\n", - err); - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block rocker_netevent_nb __read_mostly = { - .notifier_call = rocker_netevent_event, -}; - -/*********************** - * Module init and exit - ***********************/ - -static int __init rocker_module_init(void) -{ - int err; - - register_netdevice_notifier(&rocker_netdevice_nb); - register_netevent_notifier(&rocker_netevent_nb); - err = pci_register_driver(&rocker_pci_driver); - if (err) - goto err_pci_register_driver; - return 0; - -err_pci_register_driver: - unregister_netevent_notifier(&rocker_netevent_nb); - unregister_netdevice_notifier(&rocker_netdevice_nb); - return err; -} - -static void __exit rocker_module_exit(void) -{ - unregister_netevent_notifier(&rocker_netevent_nb); - unregister_netdevice_notifier(&rocker_netdevice_nb); - pci_unregister_driver(&rocker_pci_driver); -} - -module_init(rocker_module_init); -module_exit(rocker_module_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); -MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); -MODULE_DESCRIPTION("Rocker switch device driver"); -MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 12490b2f6504..1ab995f7146b 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver - * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> * * This program is free software; you can redistribute it and/or modify @@ -12,456 +12,137 @@ #ifndef _ROCKER_H #define _ROCKER_H +#include <linux/kernel.h> #include <linux/types.h> - -/* Return codes */ -enum { - ROCKER_OK = 0, - ROCKER_ENOENT = 2, - ROCKER_ENXIO = 6, - ROCKER_ENOMEM = 12, - ROCKER_EEXIST = 17, - ROCKER_EINVAL = 22, - ROCKER_EMSGSIZE = 90, - ROCKER_ENOTSUP = 95, - ROCKER_ENOBUFS = 105, -}; - -#define ROCKER_FP_PORTS_MAX 62 - -#define PCI_VENDOR_ID_REDHAT 0x1b36 -#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 - -#define ROCKER_PCI_BAR0_SIZE 0x2000 - -/* MSI-X vectors */ -enum { - ROCKER_MSIX_VEC_CMD, - ROCKER_MSIX_VEC_EVENT, - ROCKER_MSIX_VEC_TEST, - ROCKER_MSIX_VEC_RESERVED0, - __ROCKER_MSIX_VEC_TX, - __ROCKER_MSIX_VEC_RX, -#define ROCKER_MSIX_VEC_TX(port) \ - (__ROCKER_MSIX_VEC_TX + ((port) * 2)) -#define ROCKER_MSIX_VEC_RX(port) \ - (__ROCKER_MSIX_VEC_RX + ((port) * 2)) -#define ROCKER_MSIX_VEC_COUNT(portcnt) \ - (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) -}; - -/* Rocker bogus registers */ -#define ROCKER_BOGUS_REG0 0x0000 -#define ROCKER_BOGUS_REG1 0x0004 -#define ROCKER_BOGUS_REG2 0x0008 -#define ROCKER_BOGUS_REG3 0x000c - -/* Rocker test registers */ -#define ROCKER_TEST_REG 0x0010 -#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ -#define ROCKER_TEST_IRQ 0x0020 -#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ -#define ROCKER_TEST_DMA_SIZE 0x0030 -#define ROCKER_TEST_DMA_CTRL 0x0034 - -/* Rocker test register ctrl */ -#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0) -#define ROCKER_TEST_DMA_CTRL_FILL BIT(1) -#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2) - -/* Rocker DMA ring register offsets */ -#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ -#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) -#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) -#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) -#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) -#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) -#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) - -/* Rocker dma ctrl register bits */ -#define ROCKER_DMA_DESC_CTRL_RESET BIT(0) - -/* Rocker DMA ring types */ -enum rocker_dma_type { - ROCKER_DMA_CMD, - ROCKER_DMA_EVENT, - __ROCKER_DMA_TX, - __ROCKER_DMA_RX, -#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) -#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) -}; - -/* Rocker DMA ring size limits and default sizes */ -#define ROCKER_DMA_SIZE_MIN 2ul -#define ROCKER_DMA_SIZE_MAX 65536ul -#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul -#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul -#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul -#define ROCKER_DMA_TX_DESC_SIZE 256 -#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul -#define ROCKER_DMA_RX_DESC_SIZE 256 - -/* Rocker DMA descriptor struct */ -struct rocker_desc { - u64 buf_addr; - u64 cookie; - u16 buf_size; - u16 tlv_size; - u16 resv[5]; - u16 comp_err; -}; - -#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15) - -/* Rocker DMA TLV struct */ -struct rocker_tlv { - u32 type; - u16 len; -}; - -/* TLVs */ -enum { - ROCKER_TLV_CMD_UNSPEC, - ROCKER_TLV_CMD_TYPE, /* u16 */ - ROCKER_TLV_CMD_INFO, /* nest */ - - __ROCKER_TLV_CMD_MAX, - ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_TYPE_UNSPEC, - ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, - - ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS, - ROCKER_TLV_CMD_TYPE_GET_PORT_STATS, - - __ROCKER_TLV_CMD_TYPE_MAX, - ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, - ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ - ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ - ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ - ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ - ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ - - __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - ROCKER_TLV_CMD_PORT_SETTINGS_MAX = - __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_PORT_STATS_UNSPEC, - ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ - - ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */ - - ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */ - - __ROCKER_TLV_CMD_PORT_STATS_MAX, - ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1, -}; - -enum rocker_port_mode { - ROCKER_PORT_MODE_OF_DPA, -}; - -enum { - ROCKER_TLV_EVENT_UNSPEC, - ROCKER_TLV_EVENT_TYPE, /* u16 */ - ROCKER_TLV_EVENT_INFO, /* nest */ - - __ROCKER_TLV_EVENT_MAX, - ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_TYPE_UNSPEC, - ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, - ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, - - __ROCKER_TLV_EVENT_TYPE_MAX, - ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, - ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ - ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ - - __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, - ROCKER_TLV_EVENT_LINK_CHANGED_MAX = - __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, - ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ - ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ - ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ - - __ROCKER_TLV_EVENT_MAC_VLAN_MAX, - ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, -}; - -enum { - ROCKER_TLV_RX_UNSPEC, - ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ - ROCKER_TLV_RX_CSUM, /* u16 */ - ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ - ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ - ROCKER_TLV_RX_FRAG_LEN, /* u16 */ - - __ROCKER_TLV_RX_MAX, - ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, -}; - -#define ROCKER_RX_FLAGS_IPV4 BIT(0) -#define ROCKER_RX_FLAGS_IPV6 BIT(1) -#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2) -#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3) -#define ROCKER_RX_FLAGS_IP_FRAG BIT(4) -#define ROCKER_RX_FLAGS_TCP BIT(5) -#define ROCKER_RX_FLAGS_UDP BIT(6) -#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) -#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) - -enum { - ROCKER_TLV_TX_UNSPEC, - ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ - ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ - ROCKER_TLV_TX_TSO_MSS, /* u16 */ - ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ - ROCKER_TLV_TX_FRAGS, /* array */ - - __ROCKER_TLV_TX_MAX, - ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, -}; - -#define ROCKER_TX_OFFLOAD_NONE 0 -#define ROCKER_TX_OFFLOAD_IP_CSUM 1 -#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 -#define ROCKER_TX_OFFLOAD_L3_CSUM 3 -#define ROCKER_TX_OFFLOAD_TSO 4 - -#define ROCKER_TX_FRAGS_MAX 16 - -enum { - ROCKER_TLV_TX_FRAG_UNSPEC, - ROCKER_TLV_TX_FRAG, /* nest */ - - __ROCKER_TLV_TX_FRAG_MAX, - ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, -}; - -enum { - ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, - ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ - ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ - - __ROCKER_TLV_TX_FRAG_ATTR_MAX, - ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, -}; - -/* cmd info nested for OF-DPA msgs */ -enum { - ROCKER_TLV_OF_DPA_UNSPEC, - ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ - ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ - ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ - ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ - ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ - ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ - ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ - ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ - ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ - ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ - ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ - ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ - ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ - ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ - ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ - ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ - ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ - ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ - ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ - ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ - ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ - ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ - ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ - ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ - ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ - ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ - ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ - ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ - ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ - ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ - ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ - ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ - - __ROCKER_TLV_OF_DPA_MAX, - ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, -}; - -/* OF-DPA table IDs */ - -enum rocker_of_dpa_table_id { - ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, - ROCKER_OF_DPA_TABLE_ID_VLAN = 10, - ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, - ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, - ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, - ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, -}; - -/* OF-DPA flow stats */ -enum { - ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, - ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ - ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ - ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ - - __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, - ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, -}; - -/* OF-DPA group types */ -enum rocker_of_dpa_group_type { - ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, - ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, - ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, - ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, - ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, - ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, - ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, - ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, - ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, -}; - -/* OF-DPA group L2 overlay types */ -enum rocker_of_dpa_overlay_type { - ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, - ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, - ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, - ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, -}; - -/* OF-DPA group ID encoding */ -#define ROCKER_GROUP_TYPE_SHIFT 28 -#define ROCKER_GROUP_TYPE_MASK 0xf0000000 -#define ROCKER_GROUP_VLAN_SHIFT 16 -#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 -#define ROCKER_GROUP_PORT_SHIFT 0 -#define ROCKER_GROUP_PORT_MASK 0x0000ffff -#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 -#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 -#define ROCKER_GROUP_SUBTYPE_SHIFT 10 -#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 -#define ROCKER_GROUP_INDEX_SHIFT 0 -#define ROCKER_GROUP_INDEX_MASK 0x0000ffff -#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 -#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff - -#define ROCKER_GROUP_TYPE_GET(group_id) \ - (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) -#define ROCKER_GROUP_TYPE_SET(type) \ - (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) -#define ROCKER_GROUP_VLAN_GET(group_id) \ - (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) -#define ROCKER_GROUP_VLAN_SET(vlan_id) \ - (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) -#define ROCKER_GROUP_PORT_GET(group_id) \ - (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) -#define ROCKER_GROUP_PORT_SET(port) \ - (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) -#define ROCKER_GROUP_INDEX_GET(group_id) \ - (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) -#define ROCKER_GROUP_INDEX_SET(index) \ - (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) -#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ - (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ - ROCKER_GROUP_INDEX_LONG_SHIFT) -#define ROCKER_GROUP_INDEX_LONG_SET(index) \ - (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ - ROCKER_GROUP_INDEX_LONG_MASK) - -#define ROCKER_GROUP_NONE 0 -#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) -#define ROCKER_GROUP_L2_REWRITE(index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ - ROCKER_GROUP_INDEX_LONG_SET(index)) -#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) -#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) -#define ROCKER_GROUP_L3_UNICAST(index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ - ROCKER_GROUP_INDEX_LONG_SET(index)) - -/* Rocker general purpose registers */ -#define ROCKER_CONTROL 0x0300 -#define ROCKER_PORT_PHYS_COUNT 0x0304 -#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ -#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ -#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ - -/* Rocker control bits */ -#define ROCKER_CONTROL_RESET BIT(0) +#include <linux/netdevice.h> +#include <net/neighbour.h> +#include <net/switchdev.h> + +#include "rocker_hw.h" + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + dma_addr_t mapaddr; +}; + +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +struct rocker_port { + struct net_device *dev; + struct rocker *rocker; + void *wpriv; + unsigned int port_number; + u32 pport; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker_world_ops; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; /* for cmd ring accesses */ + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; + struct rocker_world_ops *wops; + void *wpriv; +}; + +typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv); + +int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, + rocker_cmd_prep_cb_t prepare, void *prepare_priv, + rocker_cmd_proc_cb_t process, void *process_priv); + +int rocker_port_set_learning(struct rocker_port *rocker_port, + bool learning); + +struct rocker_world_ops { + const char *kind; + size_t priv_size; + size_t port_priv_size; + u8 mode; + int (*init)(struct rocker *rocker); + void (*fini)(struct rocker *rocker); + int (*port_pre_init)(struct rocker_port *rocker_port); + int (*port_init)(struct rocker_port *rocker_port); + void (*port_fini)(struct rocker_port *rocker_port); + void (*port_post_fini)(struct rocker_port *rocker_port); + int (*port_open)(struct rocker_port *rocker_port); + void (*port_stop)(struct rocker_port *rocker_port); + int (*port_attr_stp_state_set)(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans); + int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans); + int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags); + int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans); + int (*port_obj_vlan_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); + int (*port_obj_vlan_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan); + int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb); + int (*port_obj_fib4_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans); + int (*port_obj_fib4_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4); + int (*port_obj_fdb_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); + int (*port_obj_fdb_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb); + int (*port_obj_fdb_dump)(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb); + int (*port_master_linked)(struct rocker_port *rocker_port, + struct net_device *master); + int (*port_master_unlinked)(struct rocker_port *rocker_port, + struct net_device *master); + int (*port_neigh_update)(struct rocker_port *rocker_port, + struct neighbour *n); + int (*port_neigh_destroy)(struct rocker_port *rocker_port, + struct neighbour *n); + int (*port_ev_mac_vlan_seen)(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id); +}; + +extern struct rocker_world_ops rocker_ofdpa_ops; #endif diff --git a/drivers/net/ethernet/rocker/rocker_hw.h b/drivers/net/ethernet/rocker/rocker_hw.h new file mode 100644 index 000000000000..2adfe88859f2 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_hw.h @@ -0,0 +1,467 @@ +/* + * drivers/net/ethernet/rocker/rocker_hw.h - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_HW_H +#define _ROCKER_HW_H + +#include <linux/types.h> + +/* Return codes */ +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + +#define ROCKER_FP_PORTS_MAX 62 + +#define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 + +#define ROCKER_PCI_BAR0_SIZE 0x2000 + +/* MSI-X vectors */ +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) +}; + +/* Rocker bogus registers */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* Rocker test registers */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* Rocker test register ctrl */ +#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0) +#define ROCKER_TEST_DMA_CTRL_FILL BIT(1) +#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2) + +/* Rocker DMA ring register offsets */ +#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) +#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) +#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) +#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) +#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) +#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) + +/* Rocker dma ctrl register bits */ +#define ROCKER_DMA_DESC_CTRL_RESET BIT(0) + +/* Rocker DMA ring types */ +enum rocker_dma_type { + ROCKER_DMA_CMD, + ROCKER_DMA_EVENT, + __ROCKER_DMA_TX, + __ROCKER_DMA_RX, +#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) +#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) +}; + +/* Rocker DMA ring size limits and default sizes */ +#define ROCKER_DMA_SIZE_MIN 2ul +#define ROCKER_DMA_SIZE_MAX 65536ul +#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul +#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul +#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_TX_DESC_SIZE 256 +#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_RX_DESC_SIZE 256 + +/* Rocker DMA descriptor struct */ +struct rocker_desc { + u64 buf_addr; + u64 cookie; + u16 buf_size; + u16 tlv_size; + u16 resv[5]; + u16 comp_err; +}; + +#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15) + +/* Rocker DMA TLV struct */ +struct rocker_tlv { + u32 type; + u16 len; +}; + +/* TLVs */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_STATS_UNSPEC, + ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ + + ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */ + + ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */ + + __ROCKER_TLV_CMD_PORT_STATS_MAX, + ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1, +}; + +enum rocker_port_mode { + ROCKER_PORT_MODE_OF_DPA, +}; + +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 BIT(0) +#define ROCKER_RX_FLAGS_IPV6 BIT(1) +#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3) +#define ROCKER_RX_FLAGS_IP_FRAG BIT(4) +#define ROCKER_RX_FLAGS_TCP BIT(5) +#define ROCKER_RX_FLAGS_UDP BIT(6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) +#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) + +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* cmd info nested for OF-DPA msgs */ +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* OF-DPA table IDs */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* OF-DPA flow stats */ +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* OF-DPA group types */ +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* OF-DPA group L2 overlay types */ +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* OF-DPA group ID encoding */ +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_SHIFT 16 +#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* Rocker general purpose registers */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* Rocker control bits */ +#define ROCKER_CONTROL_RESET BIT(0) + +#endif diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c new file mode 100644 index 000000000000..28b775e5a9ad --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -0,0 +1,2909 @@ +/* + * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/sort.h> +#include <linux/random.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/if_bridge.h> +#include <linux/bitops.h> +#include <linux/ctype.h> +#include <net/switchdev.h> +#include <net/rtnetlink.h> +#include <net/netevent.h> +#include <net/arp.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <generated/utsrelease.h> + +#include "rocker_hw.h" +#include "rocker.h" +#include "rocker_tlv.h" + +static const char rocker_driver_name[] = "rocker"; + +static const struct pci_device_id rocker_pci_id_table[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, + {0, } +}; + +struct rocker_wait { + wait_queue_head_t wait; + bool done; + bool nowait; +}; + +static void rocker_wait_reset(struct rocker_wait *wait) +{ + wait->done = false; + wait->nowait = false; +} + +static void rocker_wait_init(struct rocker_wait *wait) +{ + init_waitqueue_head(&wait->wait); + rocker_wait_reset(wait); +} + +static struct rocker_wait *rocker_wait_create(void) +{ + struct rocker_wait *wait; + + wait = kzalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) + return NULL; + return wait; +} + +static void rocker_wait_destroy(struct rocker_wait *wait) +{ + kfree(wait); +} + +static bool rocker_wait_event_timeout(struct rocker_wait *wait, + unsigned long timeout) +{ + wait_event_timeout(wait->wait, wait->done, HZ / 10); + if (!wait->done) + return false; + return true; +} + +static void rocker_wait_wake_up(struct rocker_wait *wait) +{ + wait->done = true; + wake_up(&wait->wait); +} + +static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) +{ + return rocker->msix_entries[vector].vector; +} + +static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_TX(rocker_port->port_number)); +} + +static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_RX(rocker_port->port_number)); +} + +#define rocker_write32(rocker, reg, val) \ + writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read32(rocker, reg) \ + readl((rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_write64(rocker, reg, val) \ + writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read64(rocker, reg) \ + readq((rocker)->hw_addr + (ROCKER_ ## reg)) + +/***************************** + * HW basic testing functions + *****************************/ + +static int rocker_reg_test(const struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + u64 test_reg; + u64 rnd; + + rnd = prandom_u32(); + rnd >>= 1; + rocker_write32(rocker, TEST_REG, rnd); + test_reg = rocker_read32(rocker, TEST_REG); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", + test_reg, rnd * 2); + return -EIO; + } + + rnd = prandom_u32(); + rnd <<= 31; + rnd |= prandom_u32(); + rocker_write64(rocker, TEST_REG64, rnd); + test_reg = rocker_read64(rocker, TEST_REG64); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", + test_reg, rnd * 2); + return -EIO; + } + + return 0; +} + +static int rocker_dma_test_one(const struct rocker *rocker, + struct rocker_wait *wait, u32 test_type, + dma_addr_t dma_handle, const unsigned char *buf, + const unsigned char *expect, size_t size) +{ + const struct pci_dev *pdev = rocker->pdev; + int i; + + rocker_wait_reset(wait); + rocker_write32(rocker, TEST_DMA_CTRL, test_type); + + if (!rocker_wait_event_timeout(wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + return -EIO; + } + + for (i = 0; i < size; i++) { + if (buf[i] != expect[i]) { + dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", + buf[i], i, expect[i]); + return -EIO; + } + } + return 0; +} + +#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) +#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 + +static int rocker_dma_test_offset(const struct rocker *rocker, + struct rocker_wait *wait, int offset) +{ + struct pci_dev *pdev = rocker->pdev; + unsigned char *alloc; + unsigned char *buf; + unsigned char *expect; + dma_addr_t dma_handle; + int i; + int err; + + alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, + GFP_KERNEL | GFP_DMA); + if (!alloc) + return -ENOMEM; + buf = alloc + offset; + expect = buf + ROCKER_TEST_DMA_BUF_SIZE; + + dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dma_handle)) { + err = -EIO; + goto free_alloc; + } + + rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); + rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); + + memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); + for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) + expect[i] = ~buf[i]; + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + +unmap: + pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); +free_alloc: + kfree(alloc); + + return err; +} + +static int rocker_dma_test(const struct rocker *rocker, + struct rocker_wait *wait) +{ + int i; + int err; + + for (i = 0; i < 8; i++) { + err = rocker_dma_test_offset(rocker, wait, i); + if (err) + return err; + } + return 0; +} + +static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) +{ + struct rocker_wait *wait = dev_id; + + rocker_wait_wake_up(wait); + + return IRQ_HANDLED; +} + +static int rocker_basic_hw_test(const struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + struct rocker_wait wait; + int err; + + err = rocker_reg_test(rocker); + if (err) { + dev_err(&pdev->dev, "reg test failed\n"); + return err; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), + rocker_test_irq_handler, 0, + rocker_driver_name, &wait); + if (err) { + dev_err(&pdev->dev, "cannot assign test irq\n"); + return err; + } + + rocker_wait_init(&wait); + rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); + + if (!rocker_wait_event_timeout(&wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + err = -EIO; + goto free_irq; + } + + err = rocker_dma_test(rocker, &wait); + if (err) + dev_err(&pdev->dev, "dma test failed\n"); + +free_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); + return err; +} + +/****************************************** + * DMA rings and descriptors manipulations + ******************************************/ + +static u32 __pos_inc(u32 pos, size_t limit) +{ + return ++pos == limit ? 0 : pos; +} + +static int rocker_desc_err(const struct rocker_desc_info *desc_info) +{ + int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; + + switch (err) { + case ROCKER_OK: + return 0; + case -ROCKER_ENOENT: + return -ENOENT; + case -ROCKER_ENXIO: + return -ENXIO; + case -ROCKER_ENOMEM: + return -ENOMEM; + case -ROCKER_EEXIST: + return -EEXIST; + case -ROCKER_EINVAL: + return -EINVAL; + case -ROCKER_EMSGSIZE: + return -EMSGSIZE; + case -ROCKER_ENOTSUP: + return -EOPNOTSUPP; + case -ROCKER_ENOBUFS: + return -ENOBUFS; + } + + return -EINVAL; +} + +static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) +{ + desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; +} + +static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) +{ + u32 comp_err = desc_info->desc->comp_err; + + return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; +} + +static void * +rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) +{ + return (void *)(uintptr_t)desc_info->desc->cookie; +} + +static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, + void *ptr) +{ + desc_info->desc->cookie = (uintptr_t) ptr; +} + +static struct rocker_desc_info * +rocker_desc_head_get(const struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + u32 head = __pos_inc(info->head, info->size); + + desc_info = &info->desc_info[info->head]; + if (head == info->tail) + return NULL; /* ring full */ + desc_info->tlv_size = 0; + return desc_info; +} + +static void rocker_desc_commit(const struct rocker_desc_info *desc_info) +{ + desc_info->desc->buf_size = desc_info->data_size; + desc_info->desc->tlv_size = desc_info->tlv_size; +} + +static void rocker_desc_head_set(const struct rocker *rocker, + struct rocker_dma_ring_info *info, + const struct rocker_desc_info *desc_info) +{ + u32 head = __pos_inc(info->head, info->size); + + BUG_ON(head == info->tail); + rocker_desc_commit(desc_info); + info->head = head; + rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); +} + +static struct rocker_desc_info * +rocker_desc_tail_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + + if (info->tail == info->head) + return NULL; /* nothing to be done between head and tail */ + desc_info = &info->desc_info[info->tail]; + if (!rocker_desc_gen(desc_info)) + return NULL; /* gen bit not set, desc is not ready yet */ + info->tail = __pos_inc(info->tail, info->size); + desc_info->tlv_size = desc_info->desc->tlv_size; + return desc_info; +} + +static void rocker_dma_ring_credits_set(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + u32 credits) +{ + if (credits) + rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); +} + +static unsigned long rocker_dma_ring_size_fix(size_t size) +{ + return max(ROCKER_DMA_SIZE_MIN, + min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); +} + +static int rocker_dma_ring_create(const struct rocker *rocker, + unsigned int type, + size_t size, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(size != rocker_dma_ring_size_fix(size)); + info->size = size; + info->type = type; + info->head = 0; + info->tail = 0; + info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), + GFP_KERNEL); + if (!info->desc_info) + return -ENOMEM; + + info->desc = pci_alloc_consistent(rocker->pdev, + info->size * sizeof(*info->desc), + &info->mapaddr); + if (!info->desc) { + kfree(info->desc_info); + return -ENOMEM; + } + + for (i = 0; i < info->size; i++) + info->desc_info[i].desc = &info->desc[i]; + + rocker_write32(rocker, DMA_DESC_CTRL(info->type), + ROCKER_DMA_DESC_CTRL_RESET); + rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); + rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); + + return 0; +} + +static void rocker_dma_ring_destroy(const struct rocker *rocker, + const struct rocker_dma_ring_info *info) +{ + rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); + + pci_free_consistent(rocker->pdev, + info->size * sizeof(struct rocker_desc), + info->desc, info->mapaddr); + kfree(info->desc_info); +} + +static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(info->head || info->tail); + + /* When ring is consumer, we need to advance head for each desc. + * That tells hw that the desc is ready to be used by it. + */ + for (i = 0; i < info->size - 1; i++) + rocker_desc_head_set(rocker, info, &info->desc_info[i]); + rocker_desc_commit(&info->desc_info[i]); +} + +static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + int direction, size_t buf_size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + int err; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + dma_addr_t dma_handle; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto rollback; + } + + dma_handle = pci_map_single(pdev, buf, buf_size, direction); + if (pci_dma_mapping_error(pdev, dma_handle)) { + kfree(buf); + err = -EIO; + goto rollback; + } + + desc_info->data = buf; + desc_info->data_size = buf_size; + dma_unmap_addr_set(desc_info, mapaddr, dma_handle); + + desc->buf_addr = dma_handle; + desc->buf_size = buf_size; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + const struct rocker_desc_info *desc_info = &info->desc_info[i]; + + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } + return err; +} + +static void rocker_dma_ring_bufs_free(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + int direction) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + for (i = 0; i < info->size; i++) { + const struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + + desc->buf_addr = 0; + desc->buf_size = 0; + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } +} + +static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info) +{ + struct rocker_wait *wait; + + wait = rocker_wait_create(); + if (!wait) + return -ENOMEM; + rocker_desc_cookie_ptr_set(desc_info, wait); + return 0; +} + +static void +rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info) +{ + struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info); + + rocker_wait_destroy(wait); +} + +static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker) +{ + const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; + int i; + int err; + + for (i = 0; i < cmd_ring->size; i++) { + err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); + return err; +} + +static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker) +{ + const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; + int i; + + for (i = 0; i < cmd_ring->size; i++) + rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); +} + +static int rocker_dma_rings_init(struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, + ROCKER_DMA_CMD_DEFAULT_SIZE, + &rocker->cmd_ring); + if (err) { + dev_err(&pdev->dev, "failed to create command dma ring\n"); + return err; + } + + spin_lock_init(&rocker->cmd_ring_lock); + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); + goto err_dma_cmd_ring_bufs_alloc; + } + + err = rocker_dma_cmd_ring_waits_alloc(rocker); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring waits\n"); + goto err_dma_cmd_ring_waits_alloc; + } + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, + ROCKER_DMA_EVENT_DEFAULT_SIZE, + &rocker->event_ring); + if (err) { + dev_err(&pdev->dev, "failed to create event dma ring\n"); + goto err_dma_event_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, + PCI_DMA_FROMDEVICE, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); + goto err_dma_event_ring_bufs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); + return 0; + +err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); +err_dma_event_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_waits_alloc: + rocker_dma_cmd_ring_waits_free(rocker); +err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +} + +static void rocker_dma_rings_fini(struct rocker *rocker) +{ + rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_cmd_ring_waits_free(rocker); + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); +} + +static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + struct sk_buff *skb, size_t buf_len) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + + dma_handle = pci_map_single(pdev, skb->data, buf_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, dma_handle)) + return -EIO; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) + goto tlv_put_failure; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) + goto tlv_put_failure; + return 0; + +tlv_put_failure: + pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); + desc_info->tlv_size = 0; + return -EMSGSIZE; +} + +static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) +{ + return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +} + +static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct net_device *dev = rocker_port->dev; + struct sk_buff *skb; + size_t buf_len = rocker_port_rx_buf_len(rocker_port); + int err; + + /* Ensure that hw will see tlv_size zero in case of an error. + * That tells hw to use another descriptor. + */ + rocker_desc_cookie_ptr_set(desc_info, NULL); + desc_info->tlv_size = 0; + + skb = netdev_alloc_skb_ip_align(dev, buf_len); + if (!skb) + return -ENOMEM; + err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); + if (err) { + dev_kfree_skb_any(skb); + return err; + } + rocker_desc_cookie_ptr_set(desc_info, skb); + return 0; +} + +static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, + const struct rocker_tlv **attrs) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + size_t len; + + if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || + !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) + return; + dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); + len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); +} + +static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, + const struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + + if (!skb) + return; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + dev_kfree_skb_any(skb); +} + +static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) +{ + const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; + int i; + int err; + + for (i = 0; i < rx_ring->size; i++) { + err = rocker_dma_rx_ring_skb_alloc(rocker_port, + &rx_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); + return err; +} + +static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) +{ + const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; + int i; + + for (i = 0; i < rx_ring->size; i++) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); +} + +static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + int err; + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_TX(rocker_port->port_number), + ROCKER_DMA_TX_DEFAULT_SIZE, + &rocker_port->tx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); + return err; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE, + ROCKER_DMA_TX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); + goto err_dma_tx_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_RX(rocker_port->port_number), + ROCKER_DMA_RX_DEFAULT_SIZE, + &rocker_port->rx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); + goto err_dma_rx_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL, + ROCKER_DMA_RX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); + goto err_dma_rx_ring_bufs_alloc; + } + + err = rocker_dma_rx_ring_skbs_alloc(rocker_port); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); + goto err_dma_rx_ring_skbs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); + + return 0; + +err_dma_rx_ring_skbs_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_rx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); +err_dma_rx_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); +err_dma_tx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); + return err; +} + +static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + + rocker_dma_rx_ring_skbs_free(rocker_port); + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); +} + +static void rocker_port_set_enable(const struct rocker_port *rocker_port, + bool enable) +{ + u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); + + if (enable) + val |= 1ULL << rocker_port->pport; + else + val &= ~(1ULL << rocker_port->pport); + rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); +} + +/******************************** + * Interrupt handler and helpers + ********************************/ + +static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + const struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + u32 credits = 0; + + spin_lock(&rocker->cmd_ring_lock); + while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { + wait = rocker_desc_cookie_ptr_get(desc_info); + if (wait->nowait) { + rocker_desc_gen_clear(desc_info); + } else { + rocker_wait_wake_up(wait); + } + credits++; + } + spin_unlock(&rocker->cmd_ring_lock); + rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); + + return IRQ_HANDLED; +} + +static void rocker_port_link_up(const struct rocker_port *rocker_port) +{ + netif_carrier_on(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is up\n"); +} + +static void rocker_port_link_down(const struct rocker_port *rocker_port) +{ + netif_carrier_off(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is down\n"); +} + +static int rocker_event_link_change(const struct rocker *rocker, + const struct rocker_tlv *info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; + unsigned int port_number; + bool link_up; + struct rocker_port *rocker_port; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || + !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; + link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + if (netif_carrier_ok(rocker_port->dev) != link_up) { + if (link_up) + rocker_port_link_up(rocker_port); + else + rocker_port_link_down(rocker_port); + } + + return 0; +} + +static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id); + +static int rocker_event_mac_vlan_seen(const struct rocker *rocker, + const struct rocker_tlv *info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; + unsigned int port_number; + struct rocker_port *rocker_port; + const unsigned char *addr; + __be16 vlan_id; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; + addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); + vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); +} + +static int rocker_event_process(const struct rocker *rocker, + const struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; + const struct rocker_tlv *info; + u16 type; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); + if (!attrs[ROCKER_TLV_EVENT_TYPE] || + !attrs[ROCKER_TLV_EVENT_INFO]) + return -EIO; + + type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); + info = attrs[ROCKER_TLV_EVENT_INFO]; + + switch (type) { + case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: + return rocker_event_link_change(rocker, info); + case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: + return rocker_event_mac_vlan_seen(rocker, info); + } + + return -EOPNOTSUPP; +} + +static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + const struct pci_dev *pdev = rocker->pdev; + const struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + dev_err(&pdev->dev, "event desc received with err %d\n", + err); + } else { + err = rocker_event_process(rocker, desc_info); + if (err) + dev_err(&pdev->dev, "event processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); + credits++; + } + rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); + + return IRQ_HANDLED; +} + +static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_tx); + return IRQ_HANDLED; +} + +static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_rx); + return IRQ_HANDLED; +} + +/******************** + * Command interface + ********************/ + +int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, + rocker_cmd_prep_cb_t prepare, void *prepare_priv, + rocker_cmd_proc_cb_t process, void *process_priv) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + unsigned long lock_flags; + int err; + + spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); + + desc_info = rocker_desc_head_get(&rocker->cmd_ring); + if (!desc_info) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + return -EAGAIN; + } + + wait = rocker_desc_cookie_ptr_get(desc_info); + rocker_wait_init(wait); + wait->nowait = nowait; + + err = prepare(rocker_port, desc_info, prepare_priv); + if (err) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + return err; + } + + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + + if (nowait) + return 0; + + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; + + err = rocker_desc_err(desc_info); + if (err) + return err; + + if (process) + err = process(rocker_port, desc_info, process_priv); + + rocker_desc_gen_clear(desc_info); + return err; +} + +static int +rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + u32 speed; + u8 duplex; + u8 autoneg; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) + return -EIO; + + speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = SUPPORTED_TP; + ecmd->phy_address = 0xff; + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; + if (!attr) + return -EIO; + + if (rocker_tlv_len(attr) != ETH_ALEN) + return -EINVAL; + + ether_addr_copy(macaddr, rocker_tlv_data(attr)); + return 0; +} + +static int +rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + u8 *p_mode = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]; + if (!attr) + return -EIO; + + *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]); + return 0; +} + +struct port_name { + char *buf; + size_t len; +}; + +static int +rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct port_name *name = priv; + const struct rocker_tlv *attr; + size_t i, j, len; + const char *str; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; + if (!attr) + return -EIO; + + len = min_t(size_t, rocker_tlv_len(attr), name->len); + str = rocker_tlv_data(attr); + + /* make sure name only contains alphanumeric characters */ + for (i = j = 0; i < len; ++i) { + if (isalnum(str[i])) { + name->buf[j] = str[i]; + j++; + } + } + + if (j == 0) + return -EIO; + + name->buf[j] = '\0'; + + return 0; +} + +static int +rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, + ethtool_cmd_speed(ecmd))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, + ecmd->duplex)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, + ecmd->autoneg)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const unsigned char *macaddr = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + ETH_ALEN, macaddr)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + int mtu = *(int *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, + mtu)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + bool learning = *(bool *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, + learning)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_ethtool_proc, + ecmd); +} + +static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_macaddr_proc, + macaddr); +} + +static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, + u8 *p_mode) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_mode_proc, p_mode); +} + +static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_settings_ethtool_prep, + ecmd, NULL, NULL); +} + +static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_settings_macaddr_prep, + macaddr, NULL, NULL); +} + +static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, + int mtu) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_settings_mtu_prep, + &mtu, NULL, NULL); +} + +int rocker_port_set_learning(struct rocker_port *rocker_port, + bool learning) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_set_port_learning_prep, + &learning, NULL, NULL); +} + +/********************** + * Worlds manipulation + **********************/ + +static struct rocker_world_ops *rocker_world_ops[] = { + &rocker_ofdpa_ops, +}; + +#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops) + +static struct rocker_world_ops *rocker_world_ops_find(u8 mode) +{ + int i; + + for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++) + if (rocker_world_ops[i]->mode == mode) + return rocker_world_ops[i]; + return NULL; +} + +static int rocker_world_init(struct rocker *rocker, u8 mode) +{ + struct rocker_world_ops *wops; + int err; + + wops = rocker_world_ops_find(mode); + if (!wops) { + dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n", + mode); + return -EINVAL; + } + rocker->wops = wops; + rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL); + if (!rocker->wpriv) + return -ENOMEM; + if (!wops->init) + return 0; + err = wops->init(rocker); + if (err) + kfree(rocker->wpriv); + return err; +} + +static void rocker_world_fini(struct rocker *rocker) +{ + struct rocker_world_ops *wops = rocker->wops; + + if (!wops || !wops->fini) + return; + wops->fini(rocker); + kfree(rocker->wpriv); +} + +static int rocker_world_check_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + u8 mode; + int err; + + err = rocker_cmd_get_port_settings_mode(rocker_port, &mode); + if (err) { + dev_err(&rocker->pdev->dev, "failed to get port mode\n"); + return err; + } + if (rocker->wops) { + if (rocker->wops->mode != mode) { + dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); + return err; + } + return 0; + } + return rocker_world_init(rocker, mode); +} + +static int rocker_world_port_pre_init(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + int err; + + rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL); + if (!rocker_port->wpriv) + return -ENOMEM; + if (!wops->port_pre_init) + return 0; + err = wops->port_pre_init(rocker_port); + if (err) + kfree(rocker_port->wpriv); + return 0; +} + +static int rocker_world_port_init(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_init) + return 0; + return wops->port_init(rocker_port); +} + +static void rocker_world_port_fini(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_fini) + return; + wops->port_fini(rocker_port); +} + +static void rocker_world_port_post_fini(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_post_fini) + return; + wops->port_post_fini(rocker_port); + kfree(rocker_port->wpriv); +} + +static int rocker_world_port_open(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_open) + return 0; + return wops->port_open(rocker_port); +} + +static void rocker_world_port_stop(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_stop) + return; + wops->port_stop(rocker_port); +} + +static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_stp_state_set) + return -EOPNOTSUPP; + return wops->port_attr_stp_state_set(rocker_port, state, trans); +} + +static int +rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_flags_set) + return -EOPNOTSUPP; + return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, + trans); +} + +static int +rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_flags_get) + return -EOPNOTSUPP; + return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); +} + +static int +rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans) + +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_ageing_time_set) + return -EOPNOTSUPP; + return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time, + trans); +} + +static int +rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_add) + return -EOPNOTSUPP; + return wops->port_obj_vlan_add(rocker_port, vlan, trans); +} + +static int +rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_del) + return -EOPNOTSUPP; + return wops->port_obj_vlan_del(rocker_port, vlan); +} + +static int +rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_dump) + return -EOPNOTSUPP; + return wops->port_obj_vlan_dump(rocker_port, vlan, cb); +} + +static int +rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fib4_add) + return -EOPNOTSUPP; + return wops->port_obj_fib4_add(rocker_port, fib4, trans); +} + +static int +rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fib4_del) + return -EOPNOTSUPP; + return wops->port_obj_fib4_del(rocker_port, fib4); +} + +static int +rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_add) + return -EOPNOTSUPP; + return wops->port_obj_fdb_add(rocker_port, fdb, trans); +} + +static int +rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_del) + return -EOPNOTSUPP; + return wops->port_obj_fdb_del(rocker_port, fdb); +} + +static int +rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_dump) + return -EOPNOTSUPP; + return wops->port_obj_fdb_dump(rocker_port, fdb, cb); +} + +static int rocker_world_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_master_linked) + return -EOPNOTSUPP; + return wops->port_master_linked(rocker_port, master); +} + +static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_master_unlinked) + return -EOPNOTSUPP; + return wops->port_master_unlinked(rocker_port, master); +} + +static int rocker_world_port_neigh_update(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_neigh_update) + return -EOPNOTSUPP; + return wops->port_neigh_update(rocker_port, n); +} + +static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_neigh_destroy) + return -EOPNOTSUPP; + return wops->port_neigh_destroy(rocker_port, n); +} + +static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_ev_mac_vlan_seen) + return -EOPNOTSUPP; + return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); +} + +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; + + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } + + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } + + err = rocker_world_port_open(rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot open port in world\n"); + goto err_world_port_open; + } + + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + if (!dev->proto_down) + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); + return 0; + +err_world_port_open: + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; +} + +static int rocker_port_stop(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + rocker_world_port_stop(rocker_port); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); + + return 0; +} + +static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } +} + +static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; + + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; + } + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); + return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; +} + +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (unlikely(!desc_info)) { + if (net_ratelimit()) + netdev_err(dev, "tx ring full when queue awake\n"); + return NETDEV_TX_BUSY; + } + + rocker_desc_cookie_ptr_set(desc_info, skb); + + frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); + if (!frags) + goto out; + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb->data, skb_headlen(skb)); + if (err) + goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { + err = skb_linearize(skb); + if (err) + goto unmap_frags; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb_frag_address(frag), + skb_frag_size(frag)); + if (err) + goto unmap_frags; + } + rocker_tlv_nest_end(desc_info, frags); + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (!desc_info) + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +unmap_frags: + rocker_tx_desc_frags_unmap(rocker_port, desc_info); +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frags); +out: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static int rocker_port_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int running = netif_running(dev); + int err; + +#define ROCKER_PORT_MIN_MTU 68 +#define ROCKER_PORT_MAX_MTU 9000 + + if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) + return -EINVAL; + + if (running) + rocker_port_stop(dev); + + netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); + dev->mtu = new_mtu; + + err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); + if (err) + return err; + + if (running) + err = rocker_port_open(dev); + + return err; +} + +static int rocker_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct port_name name = { .buf = buf, .len = len }; + int err; + + err = rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_phys_name_proc, + &name); + + return err ? -EOPNOTSUPP : 0; +} + +static int rocker_port_change_proto_down(struct net_device *dev, + bool proto_down) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_port->dev->flags & IFF_UP) + rocker_port_set_enable(rocker_port, !proto_down); + rocker_port->dev->proto_down = proto_down; + return 0; +} + +static void rocker_port_neigh_destroy(struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(n->dev); + int err; + + err = rocker_world_port_neigh_destroy(rocker_port, n); + if (err) + netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n", + err); +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_change_mtu = rocker_port_change_mtu, + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, + .ndo_fdb_add = switchdev_port_fdb_add, + .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_dump = switchdev_port_fdb_dump, + .ndo_get_phys_port_name = rocker_port_get_phys_port_name, + .ndo_change_proto_down = rocker_port_change_proto_down, + .ndo_neigh_destroy = rocker_port_neigh_destroy, +}; + +/******************** + * swdev interface + ********************/ + +static int rocker_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + const struct rocker *rocker = rocker_port->rocker; + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(rocker->hw.id); + memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = rocker_world_port_attr_bridge_flags_get(rocker_port, + &attr->u.brport_flags); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static int rocker_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = rocker_world_port_attr_stp_state_set(rocker_port, + attr->u.stp_state, + trans); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = rocker_world_port_attr_bridge_flags_set(rocker_port, + attr->u.brport_flags, + trans); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port, + attr->u.ageing_time, + trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_world_port_obj_vlan_add(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = rocker_world_port_obj_fib4_add(rocker_port, + SWITCHDEV_OBJ_IPV4_FIB(obj), + trans); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_world_port_obj_fdb_add(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_world_port_obj_vlan_del(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = rocker_world_port_obj_fib4_del(rocker_port, + SWITCHDEV_OBJ_IPV4_FIB(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_world_port_obj_fdb_del(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_obj_dump(struct net_device *dev, + struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_world_port_obj_fdb_dump(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + cb); + break; + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_world_port_obj_vlan_dump(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + cb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static const struct switchdev_ops rocker_port_switchdev_ops = { + .switchdev_port_attr_get = rocker_port_attr_get, + .switchdev_port_attr_set = rocker_port_attr_set, + .switchdev_port_obj_add = rocker_port_obj_add, + .switchdev_port_obj_del = rocker_port_obj_del, + .switchdev_port_obj_dump = rocker_port_obj_dump, +}; + +/******************** + * ethtool interface + ********************/ + +static int rocker_port_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); +} + +static int rocker_port_set_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); +} + +static void rocker_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static struct rocker_port_stats { + char str[ETH_GSTRING_LEN]; + int type; +} rocker_port_stats[] = { + { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, + { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, + { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, + { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, + + { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, + { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, + { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, + { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, +}; + +#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) + +static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int +rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_stats; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) + return -EMSGSIZE; + + cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_stats) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, cmd_stats); + + return 0; +} + +static int +rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; + const struct rocker_tlv *pattr; + u32 pport; + u64 *data = priv; + int i; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + + if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) + return -EIO; + + pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); + if (pport != rocker_port->pport) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + pattr = stats_attrs[rocker_port_stats[i].type]; + if (!pattr) + continue; + + data[i] = rocker_tlv_get_u64(pattr); + } + + return 0; +} + +static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, + void *priv) +{ + return rocker_cmd_exec(rocker_port, false, + rocker_cmd_get_port_stats_prep, NULL, + rocker_cmd_get_port_stats_ethtool_proc, + priv); +} + +static void rocker_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { + int i; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) + data[i] = 0; + } +} + +static int rocker_port_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ROCKER_PORT_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops rocker_port_ethtool_ops = { + .get_settings = rocker_port_get_settings, + .set_settings = rocker_port_set_settings, + .get_drvinfo = rocker_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = rocker_port_get_strings, + .get_ethtool_stats = rocker_port_get_stats, + .get_sset_count = rocker_port_get_sset_count, +}; + +/***************** + * NAPI interface + *****************/ + +static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_tx); +} + +static int rocker_port_poll_tx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); + const struct rocker *rocker = rocker_port->rocker; + const struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Cleanup tx descriptors */ + while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { + struct sk_buff *skb; + + err = rocker_desc_err(desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "tx desc received with err %d\n", + err); + rocker_tx_desc_frags_unmap(rocker_port, desc_info); + + skb = rocker_desc_cookie_ptr_get(desc_info); + if (err == 0) { + rocker_port->dev->stats.tx_packets++; + rocker_port->dev->stats.tx_bytes += skb->len; + } else { + rocker_port->dev->stats.tx_errors++; + } + + dev_kfree_skb_any(skb); + credits++; + } + + if (credits && netif_queue_stopped(rocker_port->dev)) + netif_wake_queue(rocker_port->dev); + + napi_complete(napi); + rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); + + return 0; +} + +static int rocker_port_rx_proc(const struct rocker *rocker, + const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + size_t rx_len; + u16 rx_flags = 0; + + if (!skb) + return -ENOENT; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) + return -EINVAL; + if (attrs[ROCKER_TLV_RX_FLAGS]) + rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); + + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + + rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, rocker_port->dev); + + if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) + skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + + rocker_port->dev->stats.rx_packets++; + rocker_port->dev->stats.rx_bytes += skb->len; + + netif_receive_skb(skb); + + return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); +} + +static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_rx); +} + +static int rocker_port_poll_rx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); + const struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Process rx descriptors */ + while (credits < budget && + (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "rx desc received with err %d\n", + err); + } else { + err = rocker_port_rx_proc(rocker, rocker_port, + desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "rx processing failed with err %d\n", + err); + } + if (err) + rocker_port->dev->stats.rx_errors++; + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); + credits++; + } + + if (credits < budget) + napi_complete(napi); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + + return credits; +} + +/***************** + * PCI driver ops + *****************/ + +static void rocker_carrier_init(const struct rocker_port *rocker_port) +{ + const struct rocker *rocker = rocker_port->rocker; + u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); + bool link_up; + + link_up = link_status & (1 << rocker_port->pport); + if (link_up) + netif_carrier_on(rocker_port->dev); + else + netif_carrier_off(rocker_port->dev); +} + +static void rocker_remove_ports(struct rocker *rocker) +{ + struct rocker_port *rocker_port; + int i; + + for (i = 0; i < rocker->port_count; i++) { + rocker_port = rocker->ports[i]; + if (!rocker_port) + continue; + rocker_world_port_fini(rocker_port); + unregister_netdev(rocker_port->dev); + rocker_world_port_post_fini(rocker_port); + free_netdev(rocker_port->dev); + } + rocker_world_fini(rocker); + kfree(rocker->ports); +} + +static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) +{ + const struct rocker *rocker = rocker_port->rocker; + const struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_cmd_get_port_settings_macaddr(rocker_port, + rocker_port->dev->dev_addr); + if (err) { + dev_warn(&pdev->dev, "failed to get mac address, using random\n"); + eth_hw_addr_random(rocker_port->dev); + } +} + +static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) +{ + const struct pci_dev *pdev = rocker->pdev; + struct rocker_port *rocker_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct rocker_port)); + if (!dev) + return -ENOMEM; + rocker_port = netdev_priv(dev); + rocker_port->dev = dev; + rocker_port->rocker = rocker; + rocker_port->port_number = port_number; + rocker_port->pport = port_number + 1; + + err = rocker_world_check_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "world init failed\n"); + goto err_world_check_init; + } + + rocker_port_dev_addr_init(rocker_port); + dev->netdev_ops = &rocker_port_netdev_ops; + dev->ethtool_ops = &rocker_port_ethtool_ops; + dev->switchdev_ops = &rocker_port_switchdev_ops; + netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + NAPI_POLL_WEIGHT); + netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, + NAPI_POLL_WEIGHT); + rocker_carrier_init(rocker_port); + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; + + err = rocker_world_port_pre_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "port world pre-init failed\n"); + goto err_world_port_pre_init; + } + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + rocker->ports[port_number] = rocker_port; + + err = rocker_world_port_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "port world init failed\n"); + goto err_world_port_init; + } + + return 0; + +err_world_port_init: + rocker->ports[port_number] = NULL; + unregister_netdev(dev); +err_register_netdev: + rocker_world_port_post_fini(rocker_port); +err_world_port_pre_init: +err_world_check_init: + free_netdev(dev); + return err; +} + +static int rocker_probe_ports(struct rocker *rocker) +{ + int i; + size_t alloc_size; + int err; + + alloc_size = sizeof(struct rocker_port *) * rocker->port_count; + rocker->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!rocker->ports) + return -ENOMEM; + for (i = 0; i < rocker->port_count; i++) { + err = rocker_probe_port(rocker, i); + if (err) + goto remove_ports; + } + return 0; + +remove_ports: + rocker_remove_ports(rocker); + return err; +} + +static int rocker_msix_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int msix_entries; + int i; + int err; + + msix_entries = pci_msix_vec_count(pdev); + if (msix_entries < 0) + return msix_entries; + + if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) + return -EINVAL; + + rocker->msix_entries = kmalloc_array(msix_entries, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!rocker->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_entries; i++) + rocker->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); + if (err < 0) + goto err_enable_msix; + + return 0; + +err_enable_msix: + kfree(rocker->msix_entries); + return err; +} + +static void rocker_msix_fini(const struct rocker *rocker) +{ + pci_disable_msix(rocker->pdev); + kfree(rocker->msix_entries); +} + +static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rocker *rocker; + int err; + + rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); + if (!rocker) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, rocker_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + err = -EINVAL; + goto err_pci_resource_len_check; + } + + rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rocker->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + rocker->pdev = pdev; + pci_set_drvdata(pdev, rocker); + + rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); + + err = rocker_msix_init(rocker); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + err = rocker_basic_hw_test(rocker); + if (err) { + dev_err(&pdev->dev, "basic hw test failed\n"); + goto err_basic_hw_test; + } + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + + err = rocker_dma_rings_init(rocker); + if (err) + goto err_dma_rings_init; + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), + rocker_cmd_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign cmd irq\n"); + goto err_request_cmd_irq; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), + rocker_event_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign event irq\n"); + goto err_request_event_irq; + } + + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + + dev_info(&pdev->dev, "Rocker switch with id %*phN\n", + (int)sizeof(rocker->hw.id), &rocker->hw.id); + + return 0; + +err_probe_ports: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); +err_request_event_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); +err_request_cmd_irq: + rocker_dma_rings_fini(rocker); +err_dma_rings_init: +err_basic_hw_test: + rocker_msix_fini(rocker); +err_msix_init: + iounmap(rocker->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(rocker); + return err; +} + +static void rocker_remove(struct pci_dev *pdev) +{ + struct rocker *rocker = pci_get_drvdata(pdev); + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + rocker_remove_ports(rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); + rocker_dma_rings_fini(rocker); + rocker_msix_fini(rocker); + iounmap(rocker->hw_addr); + pci_release_regions(rocker->pdev); + pci_disable_device(rocker->pdev); + kfree(rocker); +} + +static struct pci_driver rocker_pci_driver = { + .name = rocker_driver_name, + .id_table = rocker_pci_id_table, + .probe = rocker_probe, + .remove = rocker_remove, +}; + +/************************************ + * Net device notifier event handler + ************************************/ + +static bool rocker_port_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &rocker_port_netdev_ops; +} + +static int rocker_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + struct rocker_port *rocker_port; + int err; + + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + info = ptr; + if (!info->master) + goto out; + rocker_port = netdev_priv(dev); + if (info->linking) { + err = rocker_world_port_master_linked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master linked (err %d)\n", + err); + } else { + err = rocker_world_port_master_unlinked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", + err); + } + } +out: + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netdevice_nb __read_mostly = { + .notifier_call = rocker_netdevice_event, +}; + +/************************************ + * Net event notifier event handler + ************************************/ + +static int rocker_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct rocker_port *rocker_port; + struct net_device *dev; + struct neighbour *n = ptr; + int err; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + dev = n->dev; + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + rocker_port = netdev_priv(dev); + err = rocker_world_port_neigh_update(rocker_port, n); + if (err) + netdev_warn(dev, "failed to handle neigh update (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netevent_nb __read_mostly = { + .notifier_call = rocker_netevent_event, +}; + +/*********************** + * Module init and exit + ***********************/ + +static int __init rocker_module_init(void) +{ + int err; + + register_netdevice_notifier(&rocker_netdevice_nb); + register_netevent_notifier(&rocker_netevent_nb); + err = pci_register_driver(&rocker_pci_driver); + if (err) + goto err_pci_register_driver; + return 0; + +err_pci_register_driver: + unregister_netevent_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + return err; +} + +static void __exit rocker_module_exit(void) +{ + unregister_netevent_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + pci_unregister_driver(&rocker_pci_driver); +} + +module_init(rocker_module_init); +module_exit(rocker_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); +MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>"); +MODULE_DESCRIPTION("Rocker switch device driver"); +MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c new file mode 100644 index 000000000000..0e758bcb26b0 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -0,0 +1,2958 @@ +/* + * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like + * implementation + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/hashtable.h> +#include <linux/crc32.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/if_vlan.h> +#include <linux/if_bridge.h> +#include <net/neighbour.h> +#include <net/switchdev.h> +#include <net/ip_fib.h> +#include <net/arp.h> + +#include "rocker.h" +#include "rocker_tlv.h" + +struct ofdpa_flow_tbl_key { + u32 priority; + enum rocker_of_dpa_table_id tbl_id; + union { + struct { + u32 in_pport; + u32 in_pport_mask; + enum rocker_of_dpa_table_id goto_tbl; + } ig_port; + struct { + u32 in_pport; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool untagged; + __be16 new_vlan_id; + } vlan; + struct { + u32 in_pport; + u32 in_pport_mask; + __be16 eth_type; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool copy_to_cpu; + } term_mac; + struct { + __be16 eth_type; + __be32 dst4; + __be32 dst4_mask; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + } ucast_routing; + struct { + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + int has_eth_dst; + int has_eth_dst_mask; + __be16 vlan_id; + u32 tunnel_id; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + bool copy_to_cpu; + } bridge; + struct { + u32 in_pport; + u32 in_pport_mask; + u8 eth_src[ETH_ALEN]; + u8 eth_src_mask[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 eth_type; + __be16 vlan_id; + __be16 vlan_id_mask; + u8 ip_proto; + u8 ip_proto_mask; + u8 ip_tos; + u8 ip_tos_mask; + u32 group_id; + } acl; + }; +}; + +struct ofdpa_flow_tbl_entry { + struct hlist_node entry; + u32 cmd; + u64 cookie; + struct ofdpa_flow_tbl_key key; + size_t key_len; + u32 key_crc32; /* key */ +}; + +struct ofdpa_group_tbl_entry { + struct hlist_node entry; + u32 cmd; + u32 group_id; /* key */ + u16 group_count; + u32 *group_ids; + union { + struct { + u8 pop_vlan; + } l2_interface; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + u32 group_id; + } l2_rewrite; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + bool ttl_check; + u32 group_id; + } l3_unicast; + }; +}; + +struct ofdpa_fdb_tbl_entry { + struct hlist_node entry; + u32 key_crc32; /* key */ + bool learned; + unsigned long touched; + struct ofdpa_fdb_tbl_key { + struct ofdpa_port *ofdpa_port; + u8 addr[ETH_ALEN]; + __be16 vlan_id; + } key; +}; + +struct ofdpa_internal_vlan_tbl_entry { + struct hlist_node entry; + int ifindex; /* key */ + u32 ref_count; + __be16 vlan_id; +}; + +struct ofdpa_neigh_tbl_entry { + struct hlist_node entry; + __be32 ip_addr; /* key */ + struct net_device *dev; + u32 ref_count; + u32 index; + u8 eth_dst[ETH_ALEN]; + bool ttl_check; +}; + +enum { + OFDPA_CTRL_LINK_LOCAL_MCAST, + OFDPA_CTRL_LOCAL_ARP, + OFDPA_CTRL_IPV4_MCAST, + OFDPA_CTRL_IPV6_MCAST, + OFDPA_CTRL_DFLT_BRIDGING, + OFDPA_CTRL_DFLT_OVS, + OFDPA_CTRL_MAX, +}; + +#define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00 +#define OFDPA_N_INTERNAL_VLANS 255 +#define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS) +#define OFDPA_UNTAGGED_VID 0 + +struct ofdpa { + struct rocker *rocker; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; /* for flow tbl accesses */ + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; /* for group tbl accesses */ + struct timer_list fdb_cleanup_timer; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ + unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ + u32 neigh_tbl_next_index; + unsigned long ageing_time; +}; + +struct ofdpa_port { + struct ofdpa *ofdpa; + struct rocker_port *rocker_port; + struct net_device *dev; + u32 pport; + struct net_device *bridge_dev; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + unsigned long ageing_time; + bool ctrls[OFDPA_CTRL_MAX]; + unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN]; +}; + +static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; +static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; +static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; + +/* Rocker priority levels for flow table entries. Higher + * priority match takes precedence over lower priority match. + */ + +enum { + OFDPA_PRIORITY_UNKNOWN = 0, + OFDPA_PRIORITY_IG_PORT = 1, + OFDPA_PRIORITY_VLAN = 1, + OFDPA_PRIORITY_TERM_MAC_UCAST = 0, + OFDPA_PRIORITY_TERM_MAC_MCAST = 1, + OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, + OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, + OFDPA_PRIORITY_BRIDGING_VLAN = 3, + OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, + OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, + OFDPA_PRIORITY_BRIDGING_TENANT = 3, + OFDPA_PRIORITY_ACL_CTRL = 3, + OFDPA_PRIORITY_ACL_NORMAL = 2, + OFDPA_PRIORITY_ACL_DFLT = 1, +}; + +static bool ofdpa_vlan_id_is_internal(__be16 vlan_id) +{ + u16 start = OFDPA_INTERNAL_VLAN_ID_BASE; + u16 end = 0xffe; + u16 _vlan_id = ntohs(vlan_id); + + return (_vlan_id >= start && _vlan_id <= end); +} + +static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port, + u16 vid, bool *pop_vlan) +{ + __be16 vlan_id; + + if (pop_vlan) + *pop_vlan = false; + vlan_id = htons(vid); + if (!vlan_id) { + vlan_id = ofdpa_port->internal_vlan_id; + if (pop_vlan) + *pop_vlan = true; + } + + return vlan_id; +} + +static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port, + __be16 vlan_id) +{ + if (ofdpa_vlan_id_is_internal(vlan_id)) + return 0; + + return ntohs(vlan_id); +} + +static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port, + const char *kind) +{ + return ofdpa_port->bridge_dev && + !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind); +} + +static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port) +{ + return ofdpa_port_is_slave(ofdpa_port, "bridge"); +} + +static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port) +{ + return ofdpa_port_is_slave(ofdpa_port, "openvswitch"); +} + +#define OFDPA_OP_FLAG_REMOVE BIT(0) +#define OFDPA_OP_FLAG_NOWAIT BIT(1) +#define OFDPA_OP_FLAG_LEARNED BIT(2) +#define OFDPA_OP_FLAG_REFRESH BIT(3) + +static bool ofdpa_flags_nowait(int flags) +{ + return flags & OFDPA_OP_FLAG_NOWAIT; +} + +static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + struct switchdev_trans_item *elem = NULL; + gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ? + GFP_ATOMIC : GFP_KERNEL; + + /* If in transaction prepare phase, allocate the memory + * and enqueue it on a transaction. If in transaction + * commit phase, dequeue the memory from the transaction + * rather than re-allocating the memory. The idea is the + * driver code paths for prepare and commit are identical + * so the memory allocated in the prepare phase is the + * memory used in the commit phase. + */ + + if (!trans) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + } else if (switchdev_trans_ph_prepare(trans)) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + if (!elem) + return NULL; + switchdev_trans_item_enqueue(trans, elem, kfree, elem); + } else { + elem = switchdev_trans_item_dequeue(trans); + } + + return elem ? elem + 1 : NULL; +} + +static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + return __ofdpa_mem_alloc(trans, flags, size); +} + +static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags, + size_t n, size_t size) +{ + return __ofdpa_mem_alloc(trans, flags, n * size); +} + +static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem) +{ + struct switchdev_trans_item *elem; + + /* Frees are ignored if in transaction prepare phase. The + * memory remains on the per-port list until freed in the + * commit phase. + */ + + if (switchdev_trans_ph_prepare(trans)) + return; + + elem = (struct switchdev_trans_item *) mem - 1; + kfree(elem); +} + +/************************************************************* + * Flow, group, FDB, internal VLAN and neigh command prepares + *************************************************************/ + +static int +ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ig_port.goto_tbl)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.vlan.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.vlan.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.vlan.goto_tbl)) + return -EMSGSIZE; + if (entry->key.vlan.untagged && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, + entry->key.vlan.new_vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.term_mac.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.term_mac.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.term_mac.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.term_mac.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.term_mac.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.term_mac.goto_tbl)) + return -EMSGSIZE; + if (entry->key.term_mac.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.term_mac.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.ucast_routing.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, + entry->key.ucast_routing.dst4)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, + entry->key.ucast_routing.dst4_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ucast_routing.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.ucast_routing.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (entry->key.bridge.has_eth_dst && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.bridge.eth_dst)) + return -EMSGSIZE; + if (entry->key.bridge.has_eth_dst_mask && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.bridge.eth_dst_mask)) + return -EMSGSIZE; + if (entry->key.bridge.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.bridge.vlan_id)) + return -EMSGSIZE; + if (entry->key.bridge.tunnel_id && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, + entry->key.bridge.tunnel_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.bridge.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.bridge.group_id)) + return -EMSGSIZE; + if (entry->key.bridge.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.bridge.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->key.acl.eth_src)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_src_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.acl.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.acl.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.acl.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.acl.vlan_id_mask)) + return -EMSGSIZE; + + switch (ntohs(entry->key.acl.eth_type)) { + case ETH_P_IP: + case ETH_P_IPV6: + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, + entry->key.acl.ip_proto)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, + entry->key.acl.ip_proto_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, + entry->key.acl.ip_tos & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, + entry->key.acl.ip_tos_mask & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, + (entry->key.acl.ip_tos & 0xc0) >> 6)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_ECN_MASK, + (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) + return -EMSGSIZE; + break; + } + + if (entry->key.acl.group_id != ROCKER_GROUP_NONE && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.acl.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, + entry->key.tbl_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, + entry->key.priority)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + + switch (entry->key.tbl_id) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, + struct ofdpa_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, + ROCKER_GROUP_PORT_GET(entry->group_id))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, + entry->l2_interface.pop_vlan)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l2_rewrite.group_id)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l2_rewrite.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l2_rewrite.eth_dst)) + return -EMSGSIZE; + if (entry->l2_rewrite.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l2_rewrite.vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + int i; + struct rocker_tlv *group_ids; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, + entry->group_count)) + return -EMSGSIZE; + + group_ids = rocker_tlv_nest_start(desc_info, + ROCKER_TLV_OF_DPA_GROUP_IDS); + if (!group_ids) + return -EMSGSIZE; + + for (i = 0; i < entry->group_count; i++) + /* Note TLV array is 1-based */ + if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, group_ids); + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l3_unicast.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l3_unicast.eth_dst)) + return -EMSGSIZE; + if (entry->l3_unicast.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l3_unicast.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, + entry->l3_unicast.ttl_check)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l3_unicast.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ofdpa_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +/*************************************************** + * Flow, group, FDB, internal VLAN and neigh tables + ***************************************************/ + +static struct ofdpa_flow_tbl_entry * +ofdpa_flow_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + + hash_for_each_possible(ofdpa->flow_tbl, found, + entry, match->key_crc32) { + if (memcmp(&found->key, &match->key, key_len) == 0) + return found; + } + + return NULL; +} + +static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); + + found = ofdpa_flow_tbl_find(ofdpa, match); + + if (found) { + match->cookie = found->cookie; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + ofdpa_kfree(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; + } else { + found = match; + found->cookie = ofdpa->flow_tbl_next_cookie++; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32); + + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); + + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_flow_tbl_add, + found, NULL, NULL); + return 0; +} + +static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); + + found = ofdpa_flow_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; + } + + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); + + ofdpa_kfree(trans, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_flow_tbl_del, + found, NULL, NULL); + ofdpa_kfree(trans, found); + } + + return err; +} + +static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *entry) +{ + if (flags & OFDPA_OP_FLAG_REMOVE) + return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry); + else + return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + enum rocker_of_dpa_table_id goto_tbl) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = OFDPA_PRIORITY_IG_PORT; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; + entry->key.ig_port.goto_tbl = goto_tbl; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, __be16 vlan_id, + __be16 vlan_id_mask, + enum rocker_of_dpa_table_id goto_tbl, + bool untagged, __be16 new_vlan_id) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = OFDPA_PRIORITY_VLAN; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + entry->key.vlan.in_pport = in_pport; + entry->key.vlan.vlan_id = vlan_id; + entry->key.vlan.vlan_id_mask = vlan_id_mask; + entry->key.vlan.goto_tbl = goto_tbl; + + entry->key.vlan.untagged = untagged; + entry->key.vlan.new_vlan_id = new_vlan_id; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + u32 in_pport, u32 in_pport_mask, + __be16 eth_type, const u8 *eth_dst, + const u8 *eth_dst_mask, __be16 vlan_id, + __be16 vlan_id_mask, bool copy_to_cpu, + int flags) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + if (is_multicast_ether_addr(eth_dst)) { + entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + } else { + entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + } + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; + entry->key.term_mac.eth_type = eth_type; + ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); + ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); + entry->key.term_mac.vlan_id = vlan_id; + entry->key.term_mac.vlan_id_mask = vlan_id_mask; + entry->key.term_mac.copy_to_cpu = copy_to_cpu; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 vlan_id, u32 tunnel_id, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, bool copy_to_cpu) +{ + struct ofdpa_flow_tbl_entry *entry; + u32 priority; + bool vlan_bridging = !!vlan_id; + bool dflt = !eth_dst || (eth_dst && eth_dst_mask); + bool wild = false; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (eth_dst) { + entry->key.bridge.has_eth_dst = 1; + ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); + } + if (eth_dst_mask) { + entry->key.bridge.has_eth_dst_mask = 1; + ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); + if (!ether_addr_equal(eth_dst_mask, ff_mac)) + wild = true; + } + + priority = OFDPA_PRIORITY_UNKNOWN; + if (vlan_bridging && dflt && wild) + priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD; + else if (vlan_bridging && dflt && !wild) + priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; + else if (vlan_bridging && !dflt) + priority = OFDPA_PRIORITY_BRIDGING_VLAN; + else if (!vlan_bridging && dflt && wild) + priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD; + else if (!vlan_bridging && dflt && !wild) + priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; + else if (!vlan_bridging && !dflt) + priority = OFDPA_PRIORITY_BRIDGING_TENANT; + + entry->key.priority = priority; + entry->key.bridge.vlan_id = vlan_id; + entry->key.bridge.tunnel_id = tunnel_id; + entry->key.bridge.goto_tbl = goto_tbl; + entry->key.bridge.group_id = group_id; + entry->key.bridge.copy_to_cpu = copy_to_cpu; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + __be16 eth_type, __be32 dst, + __be32 dst_mask, u32 priority, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, int flags) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + entry->key.priority = priority; + entry->key.ucast_routing.eth_type = eth_type; + entry->key.ucast_routing.dst4 = dst; + entry->key.ucast_routing.dst4_mask = dst_mask; + entry->key.ucast_routing.goto_tbl = goto_tbl; + entry->key.ucast_routing.group_id = group_id; + entry->key_len = offsetof(struct ofdpa_flow_tbl_key, + ucast_routing.group_id); + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + const u8 *eth_src, const u8 *eth_src_mask, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 eth_type, __be16 vlan_id, + __be16 vlan_id_mask, u8 ip_proto, + u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, + u32 group_id) +{ + u32 priority; + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + priority = OFDPA_PRIORITY_ACL_NORMAL; + if (eth_dst && eth_dst_mask) { + if (ether_addr_equal(eth_dst_mask, mcast_mac)) + priority = OFDPA_PRIORITY_ACL_DFLT; + else if (is_link_local_ether_addr(eth_dst)) + priority = OFDPA_PRIORITY_ACL_CTRL; + } + + entry->key.priority = priority; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; + + if (eth_src) + ether_addr_copy(entry->key.acl.eth_src, eth_src); + if (eth_src_mask) + ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); + if (eth_dst) + ether_addr_copy(entry->key.acl.eth_dst, eth_dst); + if (eth_dst_mask) + ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); + + entry->key.acl.eth_type = eth_type; + entry->key.acl.vlan_id = vlan_id; + entry->key.acl.vlan_id_mask = vlan_id_mask; + entry->key.acl.ip_proto = ip_proto; + entry->key.acl.ip_proto_mask = ip_proto_mask; + entry->key.acl.ip_tos = ip_tos; + entry->key.acl.ip_tos_mask = ip_tos_mask; + entry->key.acl.group_id = group_id; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static struct ofdpa_group_tbl_entry * +ofdpa_group_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa_group_tbl_entry *found; + + hash_for_each_possible(ofdpa->group_tbl, found, + entry, match->group_id) { + if (found->group_id == match->group_id) + return found; + } + + return NULL; +} + +static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans, + struct ofdpa_group_tbl_entry *entry) +{ + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + ofdpa_kfree(trans, entry->group_ids); + break; + default: + break; + } + ofdpa_kfree(trans, entry); +} + +static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_group_tbl_entry *found; + unsigned long lock_flags; + + spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); + + found = ofdpa_group_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + ofdpa_group_tbl_entry_free(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; + } else { + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->group_tbl, &found->entry, found->group_id); + + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); + + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_group_tbl_add, + found, NULL, NULL); + return 0; +} + +static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_group_tbl_entry *found; + unsigned long lock_flags; + int err = 0; + + spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); + + found = ofdpa_group_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; + } + + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); + + ofdpa_group_tbl_entry_free(trans, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_group_tbl_del, + found, NULL, NULL); + ofdpa_group_tbl_entry_free(trans, found); + } + + return err; +} + +static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *entry) +{ + if (flags & OFDPA_OP_FLAG_REMOVE) + return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry); + else + return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u32 out_pport, + int pop_vlan) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + entry->l2_interface.pop_vlan = pop_vlan; + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = group_id; + entry->group_count = group_count; + + entry->group_ids = ofdpa_kcalloc(trans, flags, + group_count, sizeof(u32)); + if (!entry->group_ids) { + ofdpa_kfree(trans, entry); + return -ENOMEM; + } + memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags, + group_count, group_ids, + group_id); +} + +static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 index, const u8 *src_mac, const u8 *dst_mac, + __be16 vlan_id, bool ttl_check, u32 pport) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L3_UNICAST(index); + if (src_mac) + ether_addr_copy(entry->l3_unicast.eth_src, src_mac); + if (dst_mac) + ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); + entry->l3_unicast.vlan_id = vlan_id; + entry->l3_unicast.ttl_check = ttl_check; + entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static struct ofdpa_neigh_tbl_entry * +ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr) +{ + struct ofdpa_neigh_tbl_entry *found; + + hash_for_each_possible(ofdpa->neigh_tbl, found, + entry, be32_to_cpu(ip_addr)) + if (found->ip_addr == ip_addr) + return found; + + return NULL; +} + +static void ofdpa_neigh_add(struct ofdpa *ofdpa, + struct switchdev_trans *trans, + struct ofdpa_neigh_tbl_entry *entry) +{ + if (!switchdev_trans_ph_commit(trans)) + entry->index = ofdpa->neigh_tbl_next_index++; + if (switchdev_trans_ph_prepare(trans)) + return; + entry->ref_count++; + hash_add(ofdpa->neigh_tbl, &entry->entry, + be32_to_cpu(entry->ip_addr)); +} + +static void ofdpa_neigh_del(struct switchdev_trans *trans, + struct ofdpa_neigh_tbl_entry *entry) +{ + if (switchdev_trans_ph_prepare(trans)) + return; + if (--entry->ref_count == 0) { + hash_del(&entry->entry); + ofdpa_kfree(trans, entry); + } +} + +static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry, + struct switchdev_trans *trans, + const u8 *eth_dst, bool ttl_check) +{ + if (eth_dst) { + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = ttl_check; + } else if (!switchdev_trans_ph_prepare(trans)) { + entry->ref_count++; + } +} + +static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, __be32 ip_addr, const u8 *eth_dst) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_neigh_tbl_entry *entry; + struct ofdpa_neigh_tbl_entry *found; + unsigned long lock_flags; + __be16 eth_type = htons(ETH_P_IP); + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + u32 priority = 0; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + bool updating; + bool removing; + int err = 0; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); + + found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = ofdpa_port->dev; + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = true; + ofdpa_neigh_add(ofdpa, trans, entry); + } else if (removing) { + memcpy(entry, found, sizeof(*entry)); + ofdpa_neigh_del(trans, found); + } else if (updating) { + ofdpa_neigh_update(found, trans, eth_dst, true); + memcpy(entry, found, sizeof(*entry)); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); + + if (err) + goto err_out; + + /* For each active neighbor, we have an L3 unicast group and + * a /32 route to the neighbor, which uses the L3 unicast + * group. The L3 unicast group can also be referred to by + * other routes' nexthops. + */ + + err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags, + entry->index, + ofdpa_port->dev->dev_addr, + entry->eth_dst, + ofdpa_port->internal_vlan_id, + entry->ttl_check, + ofdpa_port->pport); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n", + err, entry->index); + goto err_out; + } + + if (adding || removing) { + group_id = ROCKER_GROUP_L3_UNICAST(entry->index); + err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, + eth_type, ip_addr, + inet_make_mask(32), + priority, goto_tbl, + group_id, flags); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n", + err, &entry->ip_addr, group_id); + } + +err_out: + if (!adding) + ofdpa_kfree(trans, entry); + + return err; +} + +static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + __be32 ip_addr) +{ + struct net_device *dev = ofdpa_port->dev; + struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); + int err = 0; + + if (!n) { + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (IS_ERR(n)) + return PTR_ERR(n); + } + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + + if (n->nud_state & NUD_VALID) + err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0, + ip_addr, n->ha); + else + neigh_event_send(n, NULL); + + neigh_release(n); + return err; +} + +static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be32 ip_addr, u32 *index) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_neigh_tbl_entry *entry; + struct ofdpa_neigh_tbl_entry *found; + unsigned long lock_flags; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + bool updating; + bool removing; + bool resolved = true; + int err = 0; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); + + found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); + if (found) + *index = found->index; + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = ofdpa_port->dev; + ofdpa_neigh_add(ofdpa, trans, entry); + *index = entry->index; + resolved = false; + } else if (removing) { + ofdpa_neigh_del(trans, found); + } else if (updating) { + ofdpa_neigh_update(found, trans, NULL, false); + resolved = !is_zero_ether_addr(found->eth_dst); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); + + if (!adding) + ofdpa_kfree(trans, entry); + + if (err) + return err; + + /* Resolved means neigh ip_addr is resolved to neigh mac. */ + + if (!resolved) + err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr); + + return err; +} + +static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa, + int port_index) +{ + struct rocker_port *rocker_port; + + rocker_port = ofdpa->rocker->ports[port_index]; + return rocker_port ? rocker_port->wpriv : NULL; +} + +static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, __be16 vlan_id) +{ + struct ofdpa_port *p; + const struct ofdpa *ofdpa = ofdpa_port->ofdpa; + unsigned int port_count = ofdpa->rocker->port_count; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 *group_ids; + u8 group_count = 0; + int err = 0; + int i; + + group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32)); + if (!group_ids) + return -ENOMEM; + + /* Adjust the flood group for this VLAN. The flood group + * references an L2 interface group for each port in this + * VLAN. + */ + + for (i = 0; i < port_count; i++) { + p = ofdpa_port_get(ofdpa, i); + if (!p) + continue; + if (!ofdpa_port_is_bridged(p)) + continue; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { + group_ids[group_count++] = + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); + } + } + + /* If there are no bridged ports in this VLAN, we're done */ + if (group_count == 0) + goto no_ports_in_vlan; + + err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id, + group_count, group_ids, group_id); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); + +no_ports_in_vlan: + ofdpa_kfree(trans, group_ids); + return err; +} + +static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, bool pop_vlan) +{ + const struct ofdpa *ofdpa = ofdpa_port->ofdpa; + unsigned int port_count = ofdpa->rocker->port_count; + struct ofdpa_port *p; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + u32 out_pport; + int ref = 0; + int err; + int i; + + /* An L2 interface group for this port in this VLAN, but + * only when port STP state is LEARNING|FORWARDING. + */ + + if (ofdpa_port->stp_state == BR_STATE_LEARNING || + ofdpa_port->stp_state == BR_STATE_FORWARDING) { + out_pport = ofdpa_port->pport; + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + /* An L2 interface group for this VLAN to CPU port. + * Add when first port joins this VLAN and destroy when + * last port leaves this VLAN. + */ + + for (i = 0; i < port_count; i++) { + p = ofdpa_port_get(ofdpa, i); + if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) + ref++; + } + + if ((!adding || ref != 1) && (adding || ref != 0)) + return 0; + + out_pport = 0; + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err); + return err; + } + + return 0; +} + +static struct ofdpa_ctrl { + const u8 *eth_dst; + const u8 *eth_dst_mask; + __be16 eth_type; + bool acl; + bool bridge; + bool term; + bool copy_to_cpu; +} ofdpa_ctrls[] = { + [OFDPA_CTRL_LINK_LOCAL_MCAST] = { + /* pass link local multicast pkts up to CPU for filtering */ + .eth_dst = ll_mac, + .eth_dst_mask = ll_mask, + .acl = true, + }, + [OFDPA_CTRL_LOCAL_ARP] = { + /* pass local ARP pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .eth_type = htons(ETH_P_ARP), + .acl = true, + }, + [OFDPA_CTRL_IPV4_MCAST] = { + /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ + .eth_dst = ipv4_mcast, + .eth_dst_mask = ipv4_mask, + .eth_type = htons(ETH_P_IP), + .term = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_IPV6_MCAST] = { + /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ + .eth_dst = ipv6_mcast, + .eth_dst_mask = ipv6_mask, + .eth_type = htons(ETH_P_IPV6), + .term = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_DFLT_BRIDGING] = { + /* flood any pkts on vlan */ + .bridge = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_DFLT_OVS] = { + /* pass all pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .acl = true, + }, +}; + +static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport = ofdpa_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; + const u8 *eth_src = NULL; + const u8 *eth_src_mask = NULL; + __be16 vlan_id_mask = htons(0xffff); + u8 ip_proto = 0; + u8 ip_proto_mask = 0; + u8 ip_tos = 0; + u8 ip_tos_mask = 0; + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + int err; + + err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags, + in_pport, in_pport_mask, + eth_src, eth_src_mask, + ctrl->eth_dst, ctrl->eth_dst_mask, + ctrl->eth_type, + vlan_id, vlan_id_mask, + ip_proto, ip_proto_mask, + ip_tos, ip_tos_mask, + group_id); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, + const struct ofdpa_ctrl *ctrl, + __be16 vlan_id) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 tunnel_id = 0; + int err; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return 0; + + err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, + ctrl->eth_dst, ctrl->eth_dst_mask, + vlan_id, tunnel_id, + goto_tbl, group_id, ctrl->copy_to_cpu); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 vlan_id_mask = htons(0xffff); + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = ofdpa_port->internal_vlan_id; + + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + ctrl->eth_type, ctrl->eth_dst, + ctrl->eth_dst_mask, vlan_id, + vlan_id_mask, ctrl->copy_to_cpu, + flags); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + if (ctrl->acl) + return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags, + ctrl, vlan_id); + if (ctrl->bridge) + return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags, + ctrl, vlan_id); + + if (ctrl->term) + return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags, + ctrl, vlan_id); + + return -EOPNOTSUPP; +} + +static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + int err = 0; + int i; + + for (i = 0; i < OFDPA_CTRL_MAX; i++) { + if (ofdpa_port->ctrls[i]) { + err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, + &ofdpa_ctrls[i], vlan_id); + if (err) + return err; + } + } + + return err; +} + +static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, + ctrl, htons(vid)); + if (err) + break; + } + + return err; +} + +static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, u16 vid) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + u32 in_pport = ofdpa_port->pport; + __be16 vlan_id = htons(vid); + __be16 vlan_id_mask = htons(0xffff); + __be16 internal_vlan_id; + bool untagged; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + int err; + + internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged); + + if (adding && + test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) + return 0; /* already added */ + else if (!adding && + !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) + return 0; /* already removed */ + + change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); + + if (adding) { + err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err); + goto err_out; + } + } + + err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags, + internal_vlan_id, untagged); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err); + goto err_out; + } + + err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); + goto err_out; + } + + err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags, + in_pport, vlan_id, vlan_id_mask, + goto_tbl, untagged, internal_vlan_id); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err); + +err_out: + if (switchdev_trans_ph_prepare(trans)) + change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); + + return err; +} + +static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + enum rocker_of_dpa_table_id goto_tbl; + u32 in_pport; + u32 in_pport_mask; + int err; + + /* Normal Ethernet Frames. Matches pkts from any local physical + * ports. Goto VLAN tbl. + */ + + in_pport = 0; + in_pport_mask = 0xffff0000; + goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; + + err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags, + in_pport, in_pport_mask, + goto_tbl); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err); + + return err; +} + +struct ofdpa_fdb_learn_work { + struct work_struct work; + struct ofdpa_port *ofdpa_port; + struct switchdev_trans *trans; + int flags; + u8 addr[ETH_ALEN]; + u16 vid; +}; + +static void ofdpa_port_fdb_learn_work(struct work_struct *work) +{ + const struct ofdpa_fdb_learn_work *lw = + container_of(work, struct ofdpa_fdb_learn_work, work); + bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE); + bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED); + struct switchdev_notifier_fdb_info info; + + info.addr = lw->addr; + info.vid = lw->vid; + + rtnl_lock(); + if (learned && removing) + call_switchdev_notifiers(SWITCHDEV_FDB_DEL, + lw->ofdpa_port->dev, &info.info); + else if (learned && !removing) + call_switchdev_notifiers(SWITCHDEV_FDB_ADD, + lw->ofdpa_port->dev, &info.info); + rtnl_unlock(); + + ofdpa_kfree(lw->trans, work); +} + +static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const u8 *addr, __be16 vlan_id) +{ + struct ofdpa_fdb_learn_work *lw; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 out_pport = ofdpa_port->pport; + u32 tunnel_id = 0; + u32 group_id = ROCKER_GROUP_NONE; + bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC); + bool copy_to_cpu = false; + int err; + + if (ofdpa_port_is_bridged(ofdpa_port)) + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + + if (!(flags & OFDPA_OP_FLAG_REFRESH)) { + err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr, + NULL, vlan_id, tunnel_id, goto_tbl, + group_id, copy_to_cpu); + if (err) + return err; + } + + if (!syncing) + return 0; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return 0; + + lw = ofdpa_kzalloc(trans, flags, sizeof(*lw)); + if (!lw) + return -ENOMEM; + + INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work); + + lw->ofdpa_port = ofdpa_port; + lw->trans = trans; + lw->flags = flags; + ether_addr_copy(lw->addr, addr); + lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id); + + if (switchdev_trans_ph_prepare(trans)) + ofdpa_kfree(trans, lw); + else + schedule_work(&lw->work); + + return 0; +} + +static struct ofdpa_fdb_tbl_entry * +ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_fdb_tbl_entry *match) +{ + struct ofdpa_fdb_tbl_entry *found; + + hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32) + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + + return NULL; +} + +static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + const unsigned char *addr, + __be16 vlan_id, int flags) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *fdb; + struct ofdpa_fdb_tbl_entry *found; + bool removing = (flags & OFDPA_OP_FLAG_REMOVE); + unsigned long lock_flags; + + fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb)); + if (!fdb) + return -ENOMEM; + + fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED); + fdb->touched = jiffies; + fdb->key.ofdpa_port = ofdpa_port; + ether_addr_copy(fdb->key.addr, addr); + fdb->key.vlan_id = vlan_id; + fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + found = ofdpa_fdb_tbl_find(ofdpa, fdb); + + if (found) { + found->touched = jiffies; + if (removing) { + ofdpa_kfree(trans, fdb); + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + } else if (!removing) { + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->fdb_tbl, &fdb->entry, + fdb->key_crc32); + } + + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + /* Check if adding and already exists, or removing and can't find */ + if (!found != !removing) { + ofdpa_kfree(trans, fdb); + if (!found && removing) + return 0; + /* Refreshing existing to update aging timers */ + flags |= OFDPA_OP_FLAG_REFRESH; + } + + return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id); +} + +static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *found; + unsigned long lock_flags; + struct hlist_node *tmp; + int bkt; + int err = 0; + + if (ofdpa_port->stp_state == BR_STATE_LEARNING || + ofdpa_port->stp_state == BR_STATE_FORWARDING) + return 0; + + flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.ofdpa_port != ofdpa_port) + continue; + if (!found->learned) + continue; + err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags, + found->key.addr, + found->key.vlan_id); + if (err) + goto err_out; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + +err_out: + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + return err; +} + +static void ofdpa_fdb_cleanup(unsigned long data) +{ + struct ofdpa *ofdpa = (struct ofdpa *)data; + struct ofdpa_port *ofdpa_port; + struct ofdpa_fdb_tbl_entry *entry; + struct hlist_node *tmp; + unsigned long next_timer = jiffies + ofdpa->ageing_time; + unsigned long expires; + unsigned long lock_flags; + int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE | + OFDPA_OP_FLAG_LEARNED; + int bkt; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) { + if (!entry->learned) + continue; + ofdpa_port = entry->key.ofdpa_port; + expires = entry->touched + ofdpa_port->ageing_time; + if (time_before_eq(expires, jiffies)) { + ofdpa_port_fdb_learn(ofdpa_port, NULL, + flags, entry->key.addr, + entry->key.vlan_id); + hash_del(&entry->entry); + } else if (time_before(expires, next_timer)) { + next_timer = expires; + } + } + + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer)); +} + +static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 eth_type; + const u8 *dst_mac_mask = ff_mac; + __be16 vlan_id_mask = htons(0xffff); + bool copy_to_cpu = false; + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = ofdpa_port->internal_vlan_id; + + eth_type = htons(ETH_P_IP); + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + eth_type, ofdpa_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + if (err) + return err; + + eth_type = htons(ETH_P_IPV6); + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + eth_type, ofdpa_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + + return err; +} + +static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + bool pop_vlan; + u32 out_pport; + __be16 vlan_id; + u16 vid; + int err; + + /* Port will be forwarding-enabled if its STP state is LEARNING + * or FORWARDING. Traffic from CPU can still egress, regardless of + * port STP state. Use L2 interface group on port VLANs as a way + * to toggle port forwarding: if forwarding is disabled, L2 + * interface group will not exist. + */ + + if (ofdpa_port->stp_state != BR_STATE_LEARNING && + ofdpa_port->stp_state != BR_STATE_FORWARDING) + flags |= OFDPA_OP_FLAG_REMOVE; + + out_pport = ofdpa_port->pport; + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + vlan_id = htons(vid); + pop_vlan = ofdpa_vlan_id_is_internal(vlan_id); + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + return 0; +} + +static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, u8 state) +{ + bool want[OFDPA_CTRL_MAX] = { 0, }; + bool prev_ctrls[OFDPA_CTRL_MAX]; + u8 uninitialized_var(prev_state); + int err; + int i; + + if (switchdev_trans_ph_prepare(trans)) { + memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); + prev_state = ofdpa_port->stp_state; + } + + if (ofdpa_port->stp_state == state) + return 0; + + ofdpa_port->stp_state = state; + + switch (state) { + case BR_STATE_DISABLED: + /* port is completely disabled */ + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + if (!ofdpa_port_is_ovsed(ofdpa_port)) + want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; + want[OFDPA_CTRL_IPV4_MCAST] = true; + want[OFDPA_CTRL_IPV6_MCAST] = true; + if (ofdpa_port_is_bridged(ofdpa_port)) + want[OFDPA_CTRL_DFLT_BRIDGING] = true; + else if (ofdpa_port_is_ovsed(ofdpa_port)) + want[OFDPA_CTRL_DFLT_OVS] = true; + else + want[OFDPA_CTRL_LOCAL_ARP] = true; + break; + } + + for (i = 0; i < OFDPA_CTRL_MAX; i++) { + if (want[i] != ofdpa_port->ctrls[i]) { + int ctrl_flags = flags | + (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE); + err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags, + &ofdpa_ctrls[i]); + if (err) + goto err_out; + ofdpa_port->ctrls[i] = want[i]; + } + } + + err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags); + if (err) + goto err_out; + + err = ofdpa_port_fwding(ofdpa_port, trans, flags); + +err_out: + if (switchdev_trans_ph_prepare(trans)) { + memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); + ofdpa_port->stp_state = prev_state; + } + + return err; +} + +static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags) +{ + if (ofdpa_port_is_bridged(ofdpa_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return ofdpa_port_stp_update(ofdpa_port, NULL, flags, + BR_STATE_FORWARDING); +} + +static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags) +{ + if (ofdpa_port_is_bridged(ofdpa_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return ofdpa_port_stp_update(ofdpa_port, NULL, flags, + BR_STATE_DISABLED); +} + +static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + u16 vid, u16 flags) +{ + int err; + + /* XXX deal with flags for PVID and untagged */ + + err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid); + if (err) + return err; + + err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid)); + if (err) + ofdpa_port_vlan(ofdpa_port, trans, + OFDPA_OP_FLAG_REMOVE, vid); + + return err; +} + +static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port, + u16 vid, u16 flags) +{ + int err; + + err = ofdpa_port_router_mac(ofdpa_port, NULL, + OFDPA_OP_FLAG_REMOVE, htons(vid)); + if (err) + return err; + + return ofdpa_port_vlan(ofdpa_port, NULL, + OFDPA_OP_FLAG_REMOVE, vid); +} + +static struct ofdpa_internal_vlan_tbl_entry * +ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex) +{ + struct ofdpa_internal_vlan_tbl_entry *found; + + hash_for_each_possible(ofdpa->internal_vlan_tbl, found, + entry, ifindex) { + if (found->ifindex == ifindex) + return found; + } + + return NULL; +} + +static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port, + int ifindex) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_internal_vlan_tbl_entry *entry; + struct ofdpa_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + entry->ifindex = ifindex; + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); + if (found) { + kfree(entry); + goto found; + } + + found = entry; + hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex); + + for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) { + if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap)) + continue; + found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i); + goto found; + } + + netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n"); + +found: + found->ref_count++; + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + return found->vlan_id; +} + +static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, __be32 dst, + int dst_len, const struct fib_info *fi, + u32 tb_id, int flags) +{ + const struct fib_nh *nh; + __be16 eth_type = htons(ETH_P_IP); + __be32 dst_mask = inet_make_mask(dst_len); + __be16 internal_vlan_id = ofdpa_port->internal_vlan_id; + u32 priority = fi->fib_priority; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + bool nh_on_port; + bool has_gw; + u32 index; + int err; + + /* XXX support ECMP */ + + nh = fi->fib_nh; + nh_on_port = (fi->fib_dev == ofdpa_port->dev); + has_gw = !!nh->nh_gw; + + if (has_gw && nh_on_port) { + err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags, + nh->nh_gw, &index); + if (err) + return err; + + group_id = ROCKER_GROUP_L3_UNICAST(index); + } else { + /* Send to CPU for processing */ + group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); + } + + err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst, + dst_mask, priority, goto_tbl, + group_id, flags); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n", + err, &dst); + + return err; +} + +static void +ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port, + int ifindex) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + unsigned long bit; + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); + if (!found) { + netdev_err(ofdpa_port->dev, + "ifindex (%d) not found in internal VLAN tbl\n", + ifindex); + goto not_found; + } + + if (--found->ref_count <= 0) { + bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE; + clear_bit(bit, ofdpa->internal_vlan_bitmap); + hash_del(&found->entry); + kfree(found); + } + +not_found: + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); +} + +/********************************** + * Rocker world ops implementation + **********************************/ + +static int ofdpa_init(struct rocker *rocker) +{ + struct ofdpa *ofdpa = rocker->wpriv; + + ofdpa->rocker = rocker; + + hash_init(ofdpa->flow_tbl); + spin_lock_init(&ofdpa->flow_tbl_lock); + + hash_init(ofdpa->group_tbl); + spin_lock_init(&ofdpa->group_tbl_lock); + + hash_init(ofdpa->fdb_tbl); + spin_lock_init(&ofdpa->fdb_tbl_lock); + + hash_init(ofdpa->internal_vlan_tbl); + spin_lock_init(&ofdpa->internal_vlan_tbl_lock); + + hash_init(ofdpa->neigh_tbl); + spin_lock_init(&ofdpa->neigh_tbl_lock); + + setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, + (unsigned long) ofdpa); + mod_timer(&ofdpa->fdb_cleanup_timer, jiffies); + + ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME; + + return 0; +} + +static void ofdpa_fini(struct rocker *rocker) +{ + struct ofdpa *ofdpa = rocker->wpriv; + + unsigned long flags; + struct ofdpa_flow_tbl_entry *flow_entry; + struct ofdpa_group_tbl_entry *group_entry; + struct ofdpa_fdb_tbl_entry *fdb_entry; + struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry; + struct ofdpa_neigh_tbl_entry *neigh_entry; + struct hlist_node *tmp; + int bkt; + + del_timer_sync(&ofdpa->fdb_cleanup_timer); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); + hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) + hash_del(&flow_entry->entry); + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->group_tbl_lock, flags); + hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry) + hash_del(&group_entry->entry); + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags); + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry) + hash_del(&fdb_entry->entry); + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags); + hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt, + tmp, internal_vlan_entry, entry) + hash_del(&internal_vlan_entry->entry); + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags); + hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry) + hash_del(&neigh_entry->entry); + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags); +} + +static int ofdpa_port_pre_init(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port->ofdpa = rocker_port->rocker->wpriv; + ofdpa_port->rocker_port = rocker_port; + ofdpa_port->dev = rocker_port->dev; + ofdpa_port->pport = rocker_port->pport; + ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; + ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME; + return 0; +} + +static int ofdpa_port_init(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err; + + switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false); + rocker_port_set_learning(rocker_port, + !!(ofdpa_port->brport_flags & BR_LEARNING)); + + err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0); + if (err) { + netdev_err(ofdpa_port->dev, "install ig port table failed\n"); + return err; + } + + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, + ofdpa_port->dev->ifindex); + + err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); + if (err) { + netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n"); + goto err_untagged_vlan; + } + return 0; + +err_untagged_vlan: + ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); + return err; +} + +static void ofdpa_port_fini(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); +} + +static int ofdpa_port_open(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fwd_enable(ofdpa_port, 0); +} + +static void ofdpa_port_stop(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT); +} + +static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_stp_update(ofdpa_port, trans, 0, state); +} + +static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + unsigned long orig_flags; + int err = 0; + + orig_flags = ofdpa_port->brport_flags; + ofdpa_port->brport_flags = brport_flags; + if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING && + !switchdev_trans_ph_prepare(trans)) + err = rocker_port_set_learning(ofdpa_port->rocker_port, + !!(ofdpa_port->brport_flags & BR_LEARNING)); + + if (switchdev_trans_ph_prepare(trans)) + ofdpa_port->brport_flags = orig_flags; + + return err; +} + +static int +ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + *p_brport_flags = ofdpa_port->brport_flags; + return 0; +} + +static int +ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + + if (!switchdev_trans_ph_prepare(trans)) { + ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time); + if (ofdpa_port->ageing_time < ofdpa->ageing_time) + ofdpa->ageing_time = ofdpa_port->ageing_time; + mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies); + } + + return 0; +} + +static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + vlan->flags = 0; + if (ofdpa_vlan_id_is_internal(htons(vid))) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + vlan->vid_begin = vlan->vid_end = vid; + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fib_ipv4(ofdpa_port, trans, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, 0); +} + +static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fib_ipv4(ofdpa_port, NULL, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, + OFDPA_OP_FLAG_REMOVE); +} + +static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return -EINVAL; + + return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0); +} + +static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); + int flags = OFDPA_OP_FLAG_REMOVE; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return -EINVAL; + + return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags); +} + +static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *found; + struct hlist_node *tmp; + unsigned long lock_flags; + int bkt; + int err = 0; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.ofdpa_port != ofdpa_port) + continue; + ether_addr_copy(fdb->addr, found->key.addr); + fdb->ndm_state = NUD_REACHABLE; + fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port, + found->key.vlan_id); + err = cb(&fdb->obj); + if (err) + break; + } + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + return err; +} + +static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, + struct net_device *bridge) +{ + int err; + + /* Port is joining bridge, so the internal VLAN for the + * port is going to change to the bridge internal VLAN. + * Let's remove untagged VLAN (vid=0) from port and + * re-add once internal VLAN has changed. + */ + + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + ofdpa_port_internal_vlan_id_put(ofdpa_port, + ofdpa_port->dev->ifindex); + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex); + + ofdpa_port->bridge_dev = bridge; + switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true); + + return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); +} + +static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) +{ + int err; + + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + ofdpa_port_internal_vlan_id_put(ofdpa_port, + ofdpa_port->bridge_dev->ifindex); + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, + ofdpa_port->dev->ifindex); + + switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev, + false); + ofdpa_port->bridge_dev = NULL; + + err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + if (ofdpa_port->dev->flags & IFF_UP) + err = ofdpa_port_fwd_enable(ofdpa_port, 0); + + return err; +} + +static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port, + struct net_device *master) +{ + int err; + + ofdpa_port->bridge_dev = master; + + err = ofdpa_port_fwd_disable(ofdpa_port, 0); + if (err) + return err; + err = ofdpa_port_fwd_enable(ofdpa_port, 0); + + return err; +} + +static int ofdpa_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err = 0; + + if (netif_is_bridge_master(master)) + err = ofdpa_port_bridge_join(ofdpa_port, master); + else if (netif_is_ovs_master(master)) + err = ofdpa_port_ovs_changed(ofdpa_port, master); + return err; +} + +static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err = 0; + + if (ofdpa_port_is_bridged(ofdpa_port)) + err = ofdpa_port_bridge_leave(ofdpa_port); + else if (ofdpa_port_is_ovsed(ofdpa_port)) + err = ofdpa_port_ovs_changed(ofdpa_port, NULL); + return err; +} + +static int ofdpa_port_neigh_update(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) | + OFDPA_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *) n->primary_key; + + return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); +} + +static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *) n->primary_key; + + return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); +} + +static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED; + + if (ofdpa_port->stp_state != BR_STATE_LEARNING && + ofdpa_port->stp_state != BR_STATE_FORWARDING) + return 0; + + return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags); +} + +struct rocker_world_ops rocker_ofdpa_ops = { + .kind = "ofdpa", + .priv_size = sizeof(struct ofdpa), + .port_priv_size = sizeof(struct ofdpa_port), + .mode = ROCKER_PORT_MODE_OF_DPA, + .init = ofdpa_init, + .fini = ofdpa_fini, + .port_pre_init = ofdpa_port_pre_init, + .port_init = ofdpa_port_init, + .port_fini = ofdpa_port_fini, + .port_open = ofdpa_port_open, + .port_stop = ofdpa_port_stop, + .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set, + .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set, + .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get, + .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set, + .port_obj_vlan_add = ofdpa_port_obj_vlan_add, + .port_obj_vlan_del = ofdpa_port_obj_vlan_del, + .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump, + .port_obj_fib4_add = ofdpa_port_obj_fib4_add, + .port_obj_fib4_del = ofdpa_port_obj_fib4_del, + .port_obj_fdb_add = ofdpa_port_obj_fdb_add, + .port_obj_fdb_del = ofdpa_port_obj_fdb_del, + .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump, + .port_master_linked = ofdpa_port_master_linked, + .port_master_unlinked = ofdpa_port_master_unlinked, + .port_neigh_update = ofdpa_port_neigh_update, + .port_neigh_destroy = ofdpa_port_neigh_destroy, + .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen, +}; diff --git a/drivers/net/ethernet/rocker/rocker_tlv.c b/drivers/net/ethernet/rocker/rocker_tlv.c new file mode 100644 index 000000000000..8185118f3492 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_tlv.c @@ -0,0 +1,53 @@ +/* + * drivers/net/ethernet/rocker/rocker_tlv.c - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> + +#include "rocker_hw.h" +#include "rocker_tlv.h" + +void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = tlv; + } +} + +int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h new file mode 100644 index 000000000000..a63ef82e7c72 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_tlv.h @@ -0,0 +1,201 @@ +/* + * drivers/net/ethernet/rocker/rocker_tlv.h - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_TLV_H +#define _ROCKER_TLV_H + +#include <linux/types.h> + +#include "rocker_hw.h" +#include "rocker.h" + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static inline struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static inline int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static inline int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static inline int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static inline int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static inline int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static inline void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static inline int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static inline u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static inline u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static inline __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) +{ + return *(__be16 *) rocker_tlv_data(tlv); +} + +static inline u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static inline u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len); + +static inline void rocker_tlv_parse_nested(const struct rocker_tlv **tb, + int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static inline void +rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, + const struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static inline struct rocker_tlv * +rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data); + +static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, + int attrtype, __be16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); +} + +static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, + int attrtype, __be32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); +} + +static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static inline struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static inline void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static inline void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + const struct rocker_tlv *start) +{ + desc_info->tlv_size = (const char *) start - desc_info->data; +} + +#endif diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile index dcc80b9d4370..31e968561d5c 100644 --- a/drivers/net/ethernet/samsung/sxgbe/Makefile +++ b/drivers/net/ethernet/samsung/sxgbe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \ sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \ - sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y) + sxgbe_ethtool.o $(samsung-sxgbe-y) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c deleted file mode 100644 index 51c32194ba88..000000000000 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c +++ /dev/null @@ -1,91 +0,0 @@ -/* 10G controller driver for Samsung SoCs - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Siva Reddy Kallam <siva.kallam@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/bitops.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/phy.h> -#include "sxgbe_common.h" -#include "sxgbe_xpcs.h" - -static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg) -{ - u32 value; - struct sxgbe_priv_data *priv = netdev_priv(ndev); - - value = readl(priv->ioaddr + XPCS_OFFSET + reg); - - return value; -} - -static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data) -{ - struct sxgbe_priv_data *priv = netdev_priv(ndev); - - writel(data, priv->ioaddr + XPCS_OFFSET + reg); - - return 0; -} - -int sxgbe_xpcs_init(struct net_device *ndev) -{ - u32 value; - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - /* 10G XAUI mode */ - sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13)); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE); - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); - - return 0; -} - -int sxgbe_xpcs_init_1G(struct net_device *ndev) -{ - int value; - - /* 10GBASE-X PCS (1G) mode */ - sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13)); - - value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6)); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13)); - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); - - /* Auto Negotiation cluase 37 enable */ - value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12)); - - return 0; -} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h deleted file mode 100644 index 6b26a50724d3..000000000000 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* 10G controller driver for Samsung SoCs - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Byungho An <bh74.an@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __SXGBE_XPCS_H__ -#define __SXGBE_XPCS_H__ - -/* XPCS Registers */ -#define XPCS_OFFSET 0x1A060000 -#define SR_PCS_MMD_CONTROL1 0x030000 -#define SR_PCS_CONTROL2 0x030007 -#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004 -#define VR_PCS_MMD_DIGITAL_STATUS 0x038010 -#define SR_MII_MMD_CONTROL 0x1F0000 -#define SR_MII_MMD_AN_ADV 0x1F0004 -#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005 -#define VR_MII_MMD_AN_CONTROL 0x1F8001 -#define VR_MII_MMD_AN_INT_STATUS 0x1F8002 - -#define XPCS_QSEQ_STATE_STABLE 0x10 -#define XPCS_QSEQ_STATE_MPLLOFF 0x1c -#define XPCS_TYPE_SEL_R 0x00 -#define XPCS_TYPE_SEL_X 0x01 -#define XPCS_TYPE_SEL_W 0x02 -#define XPCS_XAUI_MODE 0x00 -#define XPCS_RXAUI_MODE 0x01 - -int sxgbe_xpcs_init(struct net_device *ndev); -int sxgbe_xpcs_init_1G(struct net_device *ndev); - -#endif /* __SXGBE_XPCS_H__ */ diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 10827476bc0b..5e3f93f04e62 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -32,7 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 034797661f96..445ccdb6bc67 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -783,14 +783,26 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; #define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define IP_PROTO_FULL_MASK 0xFF #define PORT_FULL_MASK ((__force __be16)~0) #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static inline void ip6_fill_mask(__be32 *mask) +{ + mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; +} + static int efx_ethtool_get_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; @@ -833,6 +845,35 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ip_entry->psrc = spec.rem_port; ip_mask->psrc = PORT_FULL_MASK; } + } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IPV6) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V6_FLOW : UDP_V6_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(ip6_entry->ip6dst, spec.loc_host, + sizeof(ip6_entry->ip6dst)); + ip6_fill_mask(ip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(ip6_entry->ip6src, spec.rem_host, + sizeof(ip6_entry->ip6src)); + ip6_fill_mask(ip6_mask->ip6src); + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip6_entry->pdst = spec.loc_port; + ip6_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip6_entry->psrc = spec.rem_port; + ip6_mask->psrc = PORT_FULL_MASK; + } } else if (!(spec.match_flags & ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | @@ -855,6 +896,47 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, mac_entry->h_proto = spec.ether_type; mac_mask->h_proto = ETHER_TYPE_FULL_MASK; } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV4_USER_FLOW; + uip_entry->ip_ver = ETH_RX_NFC_IP4; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip_mask->proto = IP_PROTO_FULL_MASK; + uip_entry->proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + uip_entry->ip4dst = spec.loc_host[0]; + uip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + uip_entry->ip4src = spec.rem_host[0]; + uip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IPV6) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV6_USER_FLOW; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip6_mask->l4_proto = IP_PROTO_FULL_MASK; + uip6_entry->l4_proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(uip6_entry->ip6dst, spec.loc_host, + sizeof(uip6_entry->ip6dst)); + ip6_fill_mask(uip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(uip6_entry->ip6src, spec.rem_host, + sizeof(uip6_entry->ip6src)); + ip6_fill_mask(uip6_mask->ip6src); + } } else { /* The above should handle all filters that we insert */ WARN_ON(1); @@ -946,11 +1028,27 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, } } +static inline bool ip6_mask_is_full(__be32 mask[4]) +{ + return !~(mask[0] & mask[1] & mask[2] & mask[3]); +} + +static inline bool ip6_mask_is_empty(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + static int efx_ethtool_set_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; @@ -1012,6 +1110,92 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, return -EINVAL; break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IPV6); + spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ? + IPPROTO_TCP : IPPROTO_UDP); + if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { + if (!ip6_mask_is_full(ip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(ip6_mask->ip6src)) { + if (!ip6_mask_is_full(ip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (ip6_mask->pdst) { + if (ip6_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip6_entry->pdst; + } + if (ip6_mask->psrc) { + if (ip6_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip6_entry->psrc; + } + if (ip6_mask->tclass) + return -EINVAL; + break; + + case IPV4_USER_FLOW: + if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || + uip_entry->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IP); + if (uip_mask->ip4dst) { + if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = uip_entry->ip4dst; + } + if (uip_mask->ip4src) { + if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = uip_entry->ip4src; + } + if (uip_mask->proto) { + if (uip_mask->proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip_entry->proto; + } + break; + + case IPV6_USER_FLOW: + if (uip6_mask->l4_4_bytes || uip6_mask->tclass) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IPV6); + if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { + if (!ip6_mask_is_full(uip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(uip6_mask->ip6src)) { + if (!ip6_mask_is_full(uip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (uip6_mask->l4_proto) { + if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip6_entry->l4_proto; + } + break; + case ETHER_FLOW: if (!is_zero_ether_addr(mac_mask->h_dest)) { if (ether_addr_equal(mac_mask->h_dest, diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index f7a0ec1bca97..233778911557 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -562,14 +562,20 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, u8 num_tc) +int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_tx_queue *tx_queue; - unsigned tc; + unsigned tc, num_tc; int rc; + if (ntc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + num_tc = ntc->tc; + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) return -EINVAL; diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index bd64eb982e52..3f5711061432 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -73,6 +73,9 @@ static const char version[] = #include <linux/etherdevice.h> #include <linux/skbuff.h> +#include <linux/dmaengine.h> +#include <linux/dma/pxa-dma.h> + #include <asm/io.h> #include "smc911x.h" @@ -1174,18 +1177,16 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) #ifdef SMC_USE_DMA static void -smc911x_tx_dma_irq(int dma, void *data) +smc911x_tx_dma_irq(void *data) { - struct net_device *dev = (struct net_device *)data; - struct smc911x_local *lp = netdev_priv(dev); + struct smc911x_local *lp = data; + struct net_device *dev = lp->netdev; struct sk_buff *skb = lp->current_tx_skb; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); - /* Clear the DMA interrupt sources */ - SMC_DMA_ACK_IRQ(dev, dma); BUG_ON(skb == NULL); dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); dev->trans_start = jiffies; @@ -1208,18 +1209,16 @@ smc911x_tx_dma_irq(int dma, void *data) "TX DMA irq completed\n"); } static void -smc911x_rx_dma_irq(int dma, void *data) +smc911x_rx_dma_irq(void *data) { - struct net_device *dev = (struct net_device *)data; - struct smc911x_local *lp = netdev_priv(dev); + struct smc911x_local *lp = data; + struct net_device *dev = lp->netdev; struct sk_buff *skb = lp->current_rx_skb; unsigned long flags; unsigned int pkts; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); - /* Clear the DMA interrupt sources */ - SMC_DMA_ACK_IRQ(dev, dma); dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); BUG_ON(skb == NULL); lp->current_rx_skb = NULL; @@ -1792,6 +1791,9 @@ static int smc911x_probe(struct net_device *dev) unsigned int val, chip_id, revision; const char *version_string; unsigned long irq_flags; + struct dma_slave_config config; + dma_cap_mask_t mask; + struct pxad_param param; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); @@ -1963,11 +1965,40 @@ static int smc911x_probe(struct net_device *dev) goto err_out; #ifdef SMC_USE_DMA - lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); - lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + param.prio = PXAD_PRIO_LOWEST; + param.drcmr = -1UL; + + lp->rxdma = + dma_request_slave_channel_compat(mask, pxad_filter_fn, + ¶m, &dev->dev, "rx"); + lp->txdma = + dma_request_slave_channel_compat(mask, pxad_filter_fn, + ¶m, &dev->dev, "tx"); lp->rxdma_active = 0; lp->txdma_active = 0; - dev->dma = lp->rxdma; + + memset(&config, 0, sizeof(config)); + config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + config.src_addr = lp->physaddr + RX_DATA_FIFO; + config.dst_addr = lp->physaddr + TX_DATA_FIFO; + config.src_maxburst = 32; + config.dst_maxburst = 32; + retval = dmaengine_slave_config(lp->rxdma, &config); + if (retval) { + dev_err(lp->dev, "dma rx channel configuration failed: %d\n", + retval); + goto err_out; + } + retval = dmaengine_slave_config(lp->txdma, &config); + if (retval) { + dev_err(lp->dev, "dma tx channel configuration failed: %d\n", + retval); + goto err_out; + } #endif retval = register_netdev(dev); @@ -1978,11 +2009,11 @@ static int smc911x_probe(struct net_device *dev) dev->base_addr, dev->irq); #ifdef SMC_USE_DMA - if (lp->rxdma != -1) - pr_cont(" RXDMA %d", lp->rxdma); + if (lp->rxdma) + pr_cont(" RXDMA %p", lp->rxdma); - if (lp->txdma != -1) - pr_cont(" TXDMA %d", lp->txdma); + if (lp->txdma) + pr_cont(" TXDMA %p", lp->txdma); #endif pr_cont("\n"); if (!is_valid_ether_addr(dev->dev_addr)) { @@ -2005,12 +2036,10 @@ static int smc911x_probe(struct net_device *dev) err_out: #ifdef SMC_USE_DMA if (retval) { - if (lp->rxdma != -1) { - SMC_DMA_FREE(dev, lp->rxdma); - } - if (lp->txdma != -1) { - SMC_DMA_FREE(dev, lp->txdma); - } + if (lp->rxdma) + dma_release_channel(lp->rxdma); + if (lp->txdma) + dma_release_channel(lp->txdma); } #endif return retval; @@ -2112,12 +2141,10 @@ static int smc911x_drv_remove(struct platform_device *pdev) #ifdef SMC_USE_DMA { - if (lp->rxdma != -1) { - SMC_DMA_FREE(dev, lp->rxdma); - } - if (lp->txdma != -1) { - SMC_DMA_FREE(dev, lp->txdma); - } + if (lp->rxdma) + dma_release_channel(lp->rxdma); + if (lp->txdma) + dma_release_channel(lp->txdma); } #endif iounmap(lp->base); diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h index 04b35f55df97..fa528ea0ea51 100644 --- a/drivers/net/ethernet/smsc/smc911x.h +++ b/drivers/net/ethernet/smsc/smc911x.h @@ -101,8 +101,8 @@ struct smc911x_local { #ifdef SMC_USE_DMA /* DMA needs the physical address of the chip */ u_long physaddr; - int rxdma; - int txdma; + struct dma_chan *rxdma; + struct dma_chan *txdma; int rxdma_active; int txdma_active; struct sk_buff *current_rx_skb; @@ -210,27 +210,6 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg, #ifdef SMC_USE_PXA_DMA -#include <mach/dma.h> - -/* - * Define the request and free functions - * These are unfortunately architecture specific as no generic allocation - * mechanism exits - */ -#define SMC_DMA_REQUEST(dev, handler) \ - pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev) - -#define SMC_DMA_FREE(dev, dma) \ - pxa_free_dma(dma) - -#define SMC_DMA_ACK_IRQ(dev, dma) \ -{ \ - if (DCSR(dma) & DCSR_BUSERR) { \ - netdev_err(dev, "DMA %d bus error!\n", dma); \ - } \ - DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \ -} - /* * Use a DMA for RX and TX packets. */ @@ -238,6 +217,8 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg, static dma_addr_t rx_dmabuf, tx_dmabuf; static int rx_dmalen, tx_dmalen; +static void smc911x_rx_dma_irq(void *data); +static void smc911x_tx_dma_irq(void *data); #ifdef SMC_insl #undef SMC_insl @@ -246,8 +227,10 @@ static int rx_dmalen, tx_dmalen; static inline void smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, - int reg, int dma, u_char *buf, int len) + int reg, struct dma_chan *dma, u_char *buf, int len) { + struct dma_async_tx_descriptor *tx; + /* 64 bit alignment is required for memory to memory DMA */ if ((long)buf & 4) { *((u32 *)buf) = SMC_inl(lp, reg); @@ -258,12 +241,14 @@ smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, len *= 4; rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE); rx_dmalen = len; - DCSR(dma) = DCSR_NODESC; - DTADR(dma) = rx_dmabuf; - DSADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | - DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; + tx = dmaengine_prep_slave_single(dma, rx_dmabuf, rx_dmalen, + DMA_DEV_TO_MEM, 0); + if (tx) { + tx->callback = smc911x_rx_dma_irq; + tx->callback_param = lp; + dmaengine_submit(tx); + dma_async_issue_pending(dma); + } } #endif @@ -274,8 +259,10 @@ smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, static inline void smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, - int reg, int dma, u_char *buf, int len) + int reg, struct dma_chan *dma, u_char *buf, int len) { + struct dma_async_tx_descriptor *tx; + /* 64 bit alignment is required for memory to memory DMA */ if ((long)buf & 4) { SMC_outl(*((u32 *)buf), lp, reg); @@ -286,12 +273,14 @@ smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, len *= 4; tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE); tx_dmalen = len; - DCSR(dma) = DCSR_NODESC; - DSADR(dma) = tx_dmabuf; - DTADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 | - DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; + tx = dmaengine_prep_slave_single(dma, tx_dmabuf, tx_dmalen, + DMA_DEV_TO_MEM, 0); + if (tx) { + tx->callback = smc911x_tx_dma_irq; + tx->callback_param = lp; + dmaengine_submit(tx); + dma_async_issue_pending(dma); + } } #endif #endif /* SMC_USE_PXA_DMA */ diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 0e2fc1a844ab..c5ed27c54724 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -540,7 +540,7 @@ static inline void smc_rcv(struct net_device *dev) #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else -#define smc_special_trylock(lock, flags) (flags == flags) +#define smc_special_trylock(lock, flags) ((void)flags, true) #define smc_special_lock(lock, flags) do { flags = 0; } while (0) #define smc_special_unlock(lock, flags) do { flags = 0; } while (0) #endif @@ -2342,8 +2342,8 @@ static int smc_drv_probe(struct platform_device *pdev) } ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { - ret = -ENODEV; + if (ndev->irq < 0) { + ret = ndev->irq; goto out_release_io; } /* diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index cf28daba4346..b3e669af3005 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -31,8 +31,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; - unsigned int txsize = priv->dma_tx_size; - unsigned int entry = priv->cur_tx % txsize; + unsigned int entry = priv->cur_tx; struct dma_desc *desc = priv->dma_tx + entry; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax; @@ -50,11 +49,14 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); + priv->tx_skbuff_dma[entry].len = bmax; + /* do not close the descriptor and do not set own bit */ + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, + 0, false); while (len != 0) { priv->tx_skbuff[entry] = NULL; - entry = (++priv->cur_tx) % txsize; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); desc = priv->dma_tx + entry; if (len > bmax) { @@ -64,9 +66,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = bmax; priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, - STMMAC_CHAIN_MODE); - priv->hw->desc->set_tx_owner(desc); + STMMAC_CHAIN_MODE, 1, + false); len -= bmax; i++; } else { @@ -76,12 +79,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = len; + /* last descriptor can be set now */ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_CHAIN_MODE); - priv->hw->desc->set_tx_owner(desc); + STMMAC_CHAIN_MODE, 1, + true); len = 0; } } + + priv->cur_tx = entry; + return entry; } @@ -138,23 +146,24 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) */ p->des3 = (unsigned int)(priv->dma_rx_phy + (((priv->dirty_rx) + 1) % - priv->dma_rx_size) * + DMA_RX_SIZE) * sizeof(struct dma_desc)); } static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + unsigned int entry = priv->dirty_tx; - if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc) + if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && + priv->hwts_tx_en) /* NOTE: Device will overwrite des3 with timestamp value if * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. */ - p->des3 = (unsigned int)(priv->dma_tx_phy + - (((priv->dirty_tx + 1) % - priv->dma_tx_size) * - sizeof(struct dma_desc))); + p->des3 = (unsigned int)((priv->dma_tx_phy + + ((priv->dirty_tx + 1) % DMA_TX_SIZE)) + * sizeof(struct dma_desc)); } const struct stmmac_mode_ops chain_mode_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1e19c8fd8b82..f96d257308b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -27,6 +27,7 @@ #include <linux/etherdevice.h> #include <linux/netdevice.h> +#include <linux/stmmac.h> #include <linux/phy.h> #include <linux/module.h> #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) @@ -41,6 +42,10 @@ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 +#define DMA_TX_SIZE 512 +#define DMA_RX_SIZE 512 +#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) + #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ @@ -95,7 +100,7 @@ struct stmmac_extra_stats { unsigned long napi_poll; unsigned long tx_normal_irq_n; unsigned long tx_clean; - unsigned long tx_reset_ic_bit; + unsigned long tx_set_ic_bit; unsigned long irq_receive_pmt_irq_n; /* MMC info */ unsigned long mmc_tx_irq_n; @@ -233,10 +238,19 @@ struct stmmac_extra_stats { /* Rx IPC status */ enum rx_frame_status { - good_frame = 0, - discard_frame = 1, - csum_none = 2, - llc_snap = 4, + good_frame = 0x0, + discard_frame = 0x1, + csum_none = 0x2, + llc_snap = 0x4, + dma_own = 0x8, +}; + +/* Tx status */ +enum tx_frame_status { + tx_done = 0x0, + tx_not_ls = 0x1, + tx_err = 0x2, + tx_dma_own = 0x4, }; enum dma_irq_status { @@ -332,17 +346,16 @@ struct stmmac_desc_ops { /* Invoked by the xmit function to prepare the tx descriptor */ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, - int csum_flag, int mode); + bool csum_flag, int mode, bool tx_own, + bool ls); /* Set/get the owner of the descriptor */ void (*set_tx_owner) (struct dma_desc *p); int (*get_tx_owner) (struct dma_desc *p); - /* Invoked by the xmit function to close the tx descriptor */ - void (*close_tx_desc) (struct dma_desc *p); /* Clean the tx descriptor as soon as the tx irq is received */ void (*release_tx_desc) (struct dma_desc *p, int mode); /* Clear interrupt on tx frame completion. When this bit is * set an interrupt happens as soon as the frame is transmitted */ - void (*clear_tx_ic) (struct dma_desc *p); + void (*set_tx_ic)(struct dma_desc *p); /* Last tx segment reports the transmit status */ int (*get_tx_ls) (struct dma_desc *p); /* Return the transmit status looking at the TDES1 */ @@ -351,7 +364,6 @@ struct stmmac_desc_ops { /* Get the buffer size from the descriptor */ int (*get_tx_len) (struct dma_desc *p); /* Handle extra events on specific interrupts hw dependent */ - int (*get_rx_owner) (struct dma_desc *p); void (*set_rx_owner) (struct dma_desc *p); /* Get the receive frame size */ int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type); @@ -376,8 +388,11 @@ extern const struct stmmac_desc_ops ndesc_ops; /* Specific DMA helpers */ struct stmmac_dma_ops { /* DMA core initialization */ - int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds); + int (*reset)(void __iomem *ioaddr); + void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb, + int aal, u32 dma_tx, u32 dma_rx, int atds); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); /* Dump DMA registers */ void (*dump_regs) (void __iomem *ioaddr); /* Set tx/rx threshold in the csr6 register diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 799c2929c536..2e4c171a2b41 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -1,6 +1,6 @@ /******************************************************************************* - Header File to describe the DMA descriptors. - Enhanced descriptors have been in case of DWMAC1000 Cores. + Header File to describe the DMA descriptors and related definitions. + This is for DWMAC100 and 1000 cores. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -24,198 +24,164 @@ #ifndef __DESCS_H__ #define __DESCS_H__ +#include <linux/bitops.h> + +/* Normal receive descriptor defines */ + +/* RDES0 */ +#define RDES0_PAYLOAD_CSUM_ERR BIT(0) +#define RDES0_CRC_ERROR BIT(1) +#define RDES0_DRIBBLING BIT(2) +#define RDES0_MII_ERROR BIT(3) +#define RDES0_RECEIVE_WATCHDOG BIT(4) +#define RDES0_FRAME_TYPE BIT(5) +#define RDES0_COLLISION BIT(6) +#define RDES0_IPC_CSUM_ERROR BIT(7) +#define RDES0_LAST_DESCRIPTOR BIT(8) +#define RDES0_FIRST_DESCRIPTOR BIT(9) +#define RDES0_VLAN_TAG BIT(10) +#define RDES0_OVERFLOW_ERROR BIT(11) +#define RDES0_LENGTH_ERROR BIT(12) +#define RDES0_SA_FILTER_FAIL BIT(13) +#define RDES0_DESCRIPTOR_ERROR BIT(14) +#define RDES0_ERROR_SUMMARY BIT(15) +#define RDES0_FRAME_LEN_MASK GENMASK(29, 16) +#define RDES0_FRAME_LEN_SHIFT 16 +#define RDES0_DA_FILTER_FAIL BIT(30) +#define RDES0_OWN BIT(31) + /* RDES1 */ +#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define RDES1_BUFFER2_SIZE_SHIFT 11 +#define RDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define RDES1_END_RING BIT(25) +#define RDES1_DISABLE_IC BIT(31) + +/* Enhanced receive descriptor defines */ + +/* RDES0 (similar to normal RDES) */ +#define ERDES0_RX_MAC_ADDR BIT(0) + +/* RDES1: completely differ from normal desc definitions */ +#define ERDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14) +#define ERDES1_END_RING BIT(15) +#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ERDES1_BUFFER2_SIZE_SHIFT 16 +#define ERDES1_DISABLE_IC BIT(31) + +/* Normal transmit descriptor defines */ +/* TDES0 */ +#define TDES0_DEFERRED BIT(0) +#define TDES0_UNDERFLOW_ERROR BIT(1) +#define TDES0_EXCESSIVE_DEFERRAL BIT(2) +#define TDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define TDES0_VLAN_FRAME BIT(7) +#define TDES0_EXCESSIVE_COLLISIONS BIT(8) +#define TDES0_LATE_COLLISION BIT(9) +#define TDES0_NO_CARRIER BIT(10) +#define TDES0_LOSS_CARRIER BIT(11) +#define TDES0_PAYLOAD_ERROR BIT(12) +#define TDES0_FRAME_FLUSHED BIT(13) +#define TDES0_JABBER_TIMEOUT BIT(14) +#define TDES0_ERROR_SUMMARY BIT(15) +#define TDES0_IP_HEADER_ERROR BIT(16) +#define TDES0_TIME_STAMP_STATUS BIT(17) +#define TDES0_OWN BIT(31) +/* TDES1 */ +#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define TDES1_BUFFER2_SIZE_SHIFT 11 +#define TDES1_TIME_STAMP_ENABLE BIT(22) +#define TDES1_DISABLE_PADDING BIT(23) +#define TDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define TDES1_END_RING BIT(25) +#define TDES1_CRC_DISABLE BIT(26) +#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27) +#define TDES1_CHECKSUM_INSERTION_SHIFT 27 +#define TDES1_FIRST_SEGMENT BIT(29) +#define TDES1_LAST_SEGMENT BIT(30) +#define TDES1_INTERRUPT BIT(31) + +/* Enhanced transmit descriptor defines */ +/* TDES0 */ +#define ETDES0_DEFERRED BIT(0) +#define ETDES0_UNDERFLOW_ERROR BIT(1) +#define ETDES0_EXCESSIVE_DEFERRAL BIT(2) +#define ETDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define ETDES0_VLAN_FRAME BIT(7) +#define ETDES0_EXCESSIVE_COLLISIONS BIT(8) +#define ETDES0_LATE_COLLISION BIT(9) +#define ETDES0_NO_CARRIER BIT(10) +#define ETDES0_LOSS_CARRIER BIT(11) +#define ETDES0_PAYLOAD_ERROR BIT(12) +#define ETDES0_FRAME_FLUSHED BIT(13) +#define ETDES0_JABBER_TIMEOUT BIT(14) +#define ETDES0_ERROR_SUMMARY BIT(15) +#define ETDES0_IP_HEADER_ERROR BIT(16) +#define ETDES0_TIME_STAMP_STATUS BIT(17) +#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20) +#define ETDES0_END_RING BIT(21) +#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22) +#define ETDES0_CHECKSUM_INSERTION_SHIFT 22 +#define ETDES0_TIME_STAMP_ENABLE BIT(25) +#define ETDES0_DISABLE_PADDING BIT(26) +#define ETDES0_CRC_DISABLE BIT(27) +#define ETDES0_FIRST_SEGMENT BIT(28) +#define ETDES0_LAST_SEGMENT BIT(29) +#define ETDES0_INTERRUPT BIT(30) +#define ETDES0_OWN BIT(31) +/* TDES1 */ +#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ETDES1_BUFFER2_SIZE_SHIFT 16 + +/* Extended Receive descriptor definitions */ +#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(2, 6) +#define ERDES4_IP_HDR_ERR BIT(3) +#define ERDES4_IP_PAYLOAD_ERR BIT(4) +#define ERDES4_IP_CSUM_BYPASSED BIT(5) +#define ERDES4_IPV4_PKT_RCVD BIT(6) +#define ERDES4_IPV6_PKT_RCVD BIT(7) +#define ERDES4_MSG_TYPE_MASK GENMASK(11, 8) +#define ERDES4_PTP_FRAME_TYPE BIT(12) +#define ERDES4_PTP_VER BIT(13) +#define ERDES4_TIMESTAMP_DROPPED BIT(14) +#define ERDES4_AV_PKT_RCVD BIT(16) +#define ERDES4_AV_TAGGED_PKT_RCVD BIT(17) +#define ERDES4_VLAN_TAG_PRI_VAL_MASK GENMASK(20, 18) +#define ERDES4_L3_FILTER_MATCH BIT(24) +#define ERDES4_L4_FILTER_MATCH BIT(25) +#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) + +/* Extended RDES4 message type definitions */ +#define RDES_EXT_NO_PTP 0 +#define RDES_EXT_SYNC 1 +#define RDES_EXT_FOLLOW_UP 2 +#define RDES_EXT_DELAY_REQ 3 +#define RDES_EXT_DELAY_RESP 4 +#define RDES_EXT_PDELAY_REQ 5 +#define RDES_EXT_PDELAY_RESP 6 +#define RDES_EXT_PDELAY_FOLLOW_UP 7 + /* Basic descriptor structure for normal and alternate descriptors */ struct dma_desc { - /* Receive descriptor */ - union { - struct { - /* RDES0 */ - u32 payload_csum_error:1; - u32 crc_error:1; - u32 dribbling:1; - u32 mii_error:1; - u32 receive_watchdog:1; - u32 frame_type:1; - u32 collision:1; - u32 ipc_csum_error:1; - u32 last_descriptor:1; - u32 first_descriptor:1; - u32 vlan_tag:1; - u32 overflow_error:1; - u32 length_error:1; - u32 sa_filter_fail:1; - u32 descriptor_error:1; - u32 error_summary:1; - u32 frame_length:14; - u32 da_filter_fail:1; - u32 own:1; - /* RDES1 */ - u32 buffer1_size:11; - u32 buffer2_size:11; - u32 reserved1:2; - u32 second_address_chained:1; - u32 end_ring:1; - u32 reserved2:5; - u32 disable_ic:1; - - } rx; - struct { - /* RDES0 */ - u32 rx_mac_addr:1; - u32 crc_error:1; - u32 dribbling:1; - u32 error_gmii:1; - u32 receive_watchdog:1; - u32 frame_type:1; - u32 late_collision:1; - u32 ipc_csum_error:1; - u32 last_descriptor:1; - u32 first_descriptor:1; - u32 vlan_tag:1; - u32 overflow_error:1; - u32 length_error:1; - u32 sa_filter_fail:1; - u32 descriptor_error:1; - u32 error_summary:1; - u32 frame_length:14; - u32 da_filter_fail:1; - u32 own:1; - /* RDES1 */ - u32 buffer1_size:13; - u32 reserved1:1; - u32 second_address_chained:1; - u32 end_ring:1; - u32 buffer2_size:13; - u32 reserved2:2; - u32 disable_ic:1; - } erx; /* -- enhanced -- */ - - /* Transmit descriptor */ - struct { - /* TDES0 */ - u32 deferred:1; - u32 underflow_error:1; - u32 excessive_deferral:1; - u32 collision_count:4; - u32 vlan_frame:1; - u32 excessive_collisions:1; - u32 late_collision:1; - u32 no_carrier:1; - u32 loss_carrier:1; - u32 payload_error:1; - u32 frame_flushed:1; - u32 jabber_timeout:1; - u32 error_summary:1; - u32 ip_header_error:1; - u32 time_stamp_status:1; - u32 reserved1:13; - u32 own:1; - /* TDES1 */ - u32 buffer1_size:11; - u32 buffer2_size:11; - u32 time_stamp_enable:1; - u32 disable_padding:1; - u32 second_address_chained:1; - u32 end_ring:1; - u32 crc_disable:1; - u32 checksum_insertion:2; - u32 first_segment:1; - u32 last_segment:1; - u32 interrupt:1; - } tx; - struct { - /* TDES0 */ - u32 deferred:1; - u32 underflow_error:1; - u32 excessive_deferral:1; - u32 collision_count:4; - u32 vlan_frame:1; - u32 excessive_collisions:1; - u32 late_collision:1; - u32 no_carrier:1; - u32 loss_carrier:1; - u32 payload_error:1; - u32 frame_flushed:1; - u32 jabber_timeout:1; - u32 error_summary:1; - u32 ip_header_error:1; - u32 time_stamp_status:1; - u32 reserved1:2; - u32 second_address_chained:1; - u32 end_ring:1; - u32 checksum_insertion:2; - u32 reserved2:1; - u32 time_stamp_enable:1; - u32 disable_padding:1; - u32 crc_disable:1; - u32 first_segment:1; - u32 last_segment:1; - u32 interrupt:1; - u32 own:1; - /* TDES1 */ - u32 buffer1_size:13; - u32 reserved3:3; - u32 buffer2_size:13; - u32 reserved4:3; - } etx; /* -- enhanced -- */ - - u64 all_flags; - } des01; + unsigned int des0; + unsigned int des1; unsigned int des2; unsigned int des3; }; -/* Extended descriptor structure (supported by new SYNP GMAC generations) */ +/* Extended descriptor structure (e.g. >= databook 3.50a) */ struct dma_extended_desc { - struct dma_desc basic; - union { - struct { - u32 ip_payload_type:3; - u32 ip_hdr_err:1; - u32 ip_payload_err:1; - u32 ip_csum_bypassed:1; - u32 ipv4_pkt_rcvd:1; - u32 ipv6_pkt_rcvd:1; - u32 msg_type:4; - u32 ptp_frame_type:1; - u32 ptp_ver:1; - u32 timestamp_dropped:1; - u32 reserved:1; - u32 av_pkt_rcvd:1; - u32 av_tagged_pkt_rcvd:1; - u32 vlan_tag_priority_val:3; - u32 reserved3:3; - u32 l3_filter_match:1; - u32 l4_filter_match:1; - u32 l3_l4_filter_no_match:2; - u32 reserved4:4; - } erx; - struct { - u32 reserved; - } etx; - } des4; + struct dma_desc basic; /* Basic descriptors */ + unsigned int des4; /* Extended Status */ unsigned int des5; /* Reserved */ unsigned int des6; /* Tx/Rx Timestamp Low */ unsigned int des7; /* Tx/Rx Timestamp High */ }; /* Transmit checksum insertion control */ -enum tdes_csum_insertion { - cic_disabled = 0, /* Checksum Insertion Control */ - cic_only_ip = 1, /* Only IP header */ - /* IP header but pseudoheader is not calculated */ - cic_no_pseudoheader = 2, - cic_full = 3, /* IP header and pseudoheader */ -}; - -/* Extended RDES4 definitions */ -#define RDES_EXT_NO_PTP 0 -#define RDES_EXT_SYNC 0x1 -#define RDES_EXT_FOLLOW_UP 0x2 -#define RDES_EXT_DELAY_REQ 0x3 -#define RDES_EXT_DELAY_RESP 0x4 -#define RDES_EXT_PDELAY_REQ 0x5 -#define RDES_EXT_PDELAY_RESP 0x6 -#define RDES_EXT_PDELAY_FOLLOW_UP 0x7 +#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */ #endif /* __DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 6f2cc78c5cf5..7635a464ce41 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -35,100 +35,91 @@ /* Enhanced descriptors */ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) { - p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; - if (end) - p->des01.erx.end_ring = 1; -} + p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK; -static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end) -{ if (end) - p->des01.etx.end_ring = 1; + p->des1 |= ERDES1_END_RING; } -static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter) +static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end) { - p->des01.etx.end_ring = ter; + if (end) + p->des0 |= ETDES0_END_RING; + else + p->des0 &= ~ETDES0_END_RING; } static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) { if (unlikely(len > BUF_SIZE_4KiB)) { - p->des01.etx.buffer1_size = BUF_SIZE_4KiB; - p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; + p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT) + & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB + & ETDES1_BUFFER1_SIZE_MASK); } else - p->des01.etx.buffer1_size = len; + p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK); } /* Normal descriptors */ static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) { - p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; - if (end) - p->des01.rx.end_ring = 1; -} + p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK; -static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end) -{ if (end) - p->des01.tx.end_ring = 1; + p->des1 |= RDES1_END_RING; } -static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter) +static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end) { - p->des01.tx.end_ring = ter; + if (end) + p->des1 |= TDES1_END_RING; + else + p->des1 &= ~TDES1_END_RING; } static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len) { if (unlikely(len > BUF_SIZE_2KiB)) { - p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; - p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size; + unsigned int buffer1 = (BUF_SIZE_2KiB - 1) + & TDES1_BUFFER1_SIZE_MASK; + p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT) + & TDES1_BUFFER2_SIZE_MASK) | buffer1); } else - p->des01.tx.buffer1_size = len; + p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK); } /* Specific functions used for Chain mode */ /* Enhanced descriptors */ -static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end) -{ - p->des01.erx.second_address_chained = 1; -} - -static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end) +static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p) { - p->des01.etx.second_address_chained = 1; + p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED; } -static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter) +static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p) { - p->des01.etx.second_address_chained = 1; + p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED; } static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len) { - p->des01.etx.buffer1_size = len; + p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK); } /* Normal descriptors */ static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end) { - p->des01.rx.second_address_chained = 1; -} - -static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size) -{ - p->des01.tx.second_address_chained = 1; + p->des1 |= RDES1_SECOND_ADDRESS_CHAINED; } -static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter) +static inline void ndesc_tx_set_on_chain(struct dma_desc *p) { - p->des01.tx.second_address_chained = 1; + p->des1 |= TDES1_SECOND_ADDRESS_CHAINED; } static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len) { - p->des01.tx.buffer1_size = len; + p->des1 |= len & TDES1_BUFFER1_SIZE_MASK; } #endif /* __DESC_COM_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 2ec6aeae349e..1657acfa70c2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -95,7 +95,6 @@ #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ #define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ -#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ #define DMA_BUS_MODE_DEFAULT 0x00000000 /* DMA Control register defines */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 8831a053ac13..b0593a4268ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -221,7 +221,6 @@ enum inter_frame_gap { /*--- DMA BLOCK defines ---*/ /* DMA Bus Mode register defines */ -#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ #define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ @@ -241,7 +240,7 @@ enum rx_tx_priority_ratio { #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ #define DMA_BUS_MODE_RPBL_SHIFT 17 #define DMA_BUS_MODE_USP 0x00800000 -#define DMA_BUS_MODE_PBL 0x01000000 +#define DMA_BUS_MODE_MAXPBL 0x01000000 #define DMA_BUS_MODE_AAL 0x02000000 /* DMA CRS Control and Status Register Mapping */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 0e8937c1184a..da32d6037e3e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -30,37 +30,76 @@ #include "dwmac1000.h" #include "dwmac_dma.h" -static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds) +static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) { - u32 value = readl(ioaddr + DMA_BUS_MODE); - int limit; + u32 value = readl(ioaddr + DMA_AXI_BUS_MODE); + int i; - /* DMA SW reset */ - value |= DMA_BUS_MODE_SFT_RESET; - writel(value, ioaddr + DMA_BUS_MODE); - limit = 10; - while (limit--) { - if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + pr_info("dwmac1000: Master AXI performs %s burst length\n", + !(value & DMA_AXI_UNDEF) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; break; - mdelay(10); + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } } - if (limit < 0) - return -EBUSY; + + writel(value, ioaddr + DMA_AXI_BUS_MODE); +} + +static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int aal, u32 dma_tx, u32 dma_rx, int atds) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); /* - * Set the DMA PBL (Programmable Burst Length) mode - * Before stmmac core 3.50 this mode bit was 4xPBL, and + * Set the DMA PBL (Programmable Burst Length) mode. + * + * Note: before stmmac core 3.50 this mode bit was 4xPBL, and * post 3.5 mode bit acts as 8*PBL. - * For core rev < 3.5, when the core is set for 4xPBL mode, the - * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats - * depending on pbl value. - * For core rev > 3.5, when the core is set for 8xPBL mode, the - * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats - * depending on pbl value. + * + * This configuration doesn't take care about the Separate PBL + * so only the bits: 13-8 are programmed with the PBL passed from the + * platform. */ - value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) | - (pbl << DMA_BUS_MODE_RPBL_SHIFT)); + value |= DMA_BUS_MODE_MAXPBL; + value &= ~DMA_BUS_MODE_PBL_MASK; + value |= (pbl << DMA_BUS_MODE_PBL_SHIFT); /* Set the Fixed burst mode */ if (fb) @@ -73,26 +112,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, if (atds) value |= DMA_BUS_MODE_ATDS; - writel(value, ioaddr + DMA_BUS_MODE); + if (aal) + value |= DMA_BUS_MODE_AAL; - /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE - * for supported bursts. - * - * Note: This is applicable only for revision GMACv3.61a. For - * older version this register is reserved and shall have no - * effect. - * - * Note: - * For Fixed Burst Mode: if we directly write 0xFF to this - * register using the configurations pass from platform code, - * this would ensure that all bursts supported by core are set - * and those which are not supported would remain ineffective. - * - * For Non Fixed Burst Mode: provide the maximum value of the - * burst length. Any burst equal or below the provided burst - * length would be allowed to perform. - */ - writel(burst_len, ioaddr + DMA_AXI_BUS_MODE); + writel(value, ioaddr + DMA_BUS_MODE); /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); @@ -102,8 +125,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, */ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); - - return 0; } static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) @@ -205,7 +226,9 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) } const struct stmmac_dma_ops dwmac1000_dma_ops = { + .reset = dwmac_dma_reset, .init = dwmac1000_dma_init, + .axi = dwmac1000_dma_axi, .dump_regs = dwmac1000_dump_dma_regs, .dma_mode = dwmac1000_dma_operation_mode, .enable_dma_transmission = dwmac_enable_dma_transmission, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 9d0971c1c2ee..61f54c99a7de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -32,24 +32,9 @@ #include "dwmac100.h" #include "dwmac_dma.h" -static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds) +static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int aal, u32 dma_tx, u32 dma_rx, int atds) { - u32 value = readl(ioaddr + DMA_BUS_MODE); - int limit; - - /* DMA SW reset */ - value |= DMA_BUS_MODE_SFT_RESET; - writel(value, ioaddr + DMA_BUS_MODE); - limit = 10; - while (limit--) { - if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) - break; - mdelay(10); - } - if (limit < 0) - return -EBUSY; - /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), ioaddr + DMA_BUS_MODE); @@ -62,8 +47,6 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, */ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); - - return 0; } /* Store and Forward capability is not used at all. @@ -131,6 +114,7 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, } const struct stmmac_dma_ops dwmac100_dma_ops = { + .reset = dwmac_dma_reset, .init = dwmac100_dma_init, .dump_regs = dwmac100_dump_dma_regs, .dma_mode = dwmac100_dma_operation_mode, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index def266da55db..726d9d9aaf83 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -35,10 +35,46 @@ #define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ #define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ + +/* SW Reset */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ + /* Rx watchdog register */ #define DMA_RX_WATCHDOG 0x00001024 -/* AXI Bus Mode */ + +/* AXI Master Bus Mode */ #define DMA_AXI_BUS_MODE 0x00001028 + +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20) +#define DMA_AXI_WR_OSR_LMT_SHIFT 20 +#define DMA_AXI_WR_OSR_LMT_MASK 0xf +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 +#define DMA_AXI_RD_OSR_LMT_MASK 0xf + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_AXI_AAL BIT(12) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_UNDEF BIT(0) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + #define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ #define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ #define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ @@ -112,5 +148,6 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr); void dwmac_dma_start_rx(void __iomem *ioaddr); void dwmac_dma_stop_rx(void __iomem *ioaddr); int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); +int dwmac_dma_reset(void __iomem *ioaddr); #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 484e3cf9c414..84e3e84cec7d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -26,6 +26,27 @@ #define GMAC_HI_REG_AE 0x80000000 +int dwmac_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int limit; + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + limit = 10; + while (limit--) { + if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + break; + mdelay(10); + } + + if (limit < 0) + return -EBUSY; + + return 0; +} + /* CSR1 enables the transmit DMA to check for new descriptor */ void dwmac_enable_dma_transmission(void __iomem *ioaddr) { diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 7d944449f5ef..cfb018c7c5eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -1,7 +1,7 @@ /******************************************************************************* This contains the functions to handle the enhanced descriptors. - Copyright (C) 2007-2009 STMicroelectronics Ltd + Copyright (C) 2007-2014 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -29,56 +29,64 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { - int ret = 0; struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = p->des0; + int ret = tx_done; - if (unlikely(p->des01.etx.error_summary)) { - if (unlikely(p->des01.etx.jabber_timeout)) + /* Get tx owner first */ + if (unlikely(tdes0 & ETDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes0 & ETDES0_LAST_SEGMENT))) + return tx_not_ls; + + if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT)) x->tx_jabber++; - if (unlikely(p->des01.etx.frame_flushed)) { + if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) { x->tx_frame_flushed++; dwmac_dma_flush_tx_fifo(ioaddr); } - if (unlikely(p->des01.etx.loss_carrier)) { + if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) { x->tx_losscarrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.etx.no_carrier)) { + if (unlikely(tdes0 & ETDES0_NO_CARRIER)) { x->tx_carrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.etx.late_collision)) - stats->collisions += p->des01.etx.collision_count; - - if (unlikely(p->des01.etx.excessive_collisions)) - stats->collisions += p->des01.etx.collision_count; + if (unlikely((tdes0 & ETDES0_LATE_COLLISION) || + (tdes0 & ETDES0_EXCESSIVE_COLLISIONS))) + stats->collisions += + (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3; - if (unlikely(p->des01.etx.excessive_deferral)) + if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL)) x->tx_deferred++; - if (unlikely(p->des01.etx.underflow_error)) { + if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) { dwmac_dma_flush_tx_fifo(ioaddr); x->tx_underflow++; } - if (unlikely(p->des01.etx.ip_header_error)) + if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR)) x->tx_ip_header_error++; - if (unlikely(p->des01.etx.payload_error)) { + if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) { x->tx_payload_error++; dwmac_dma_flush_tx_fifo(ioaddr); } - ret = -1; + ret = tx_err; } - if (unlikely(p->des01.etx.deferred)) + if (unlikely(tdes0 & ETDES0_DEFERRED)) x->tx_deferred++; #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.etx.vlan_frame) + if (tdes0 & ETDES0_VLAN_FRAME) x->tx_vlan++; #endif @@ -87,7 +95,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, static int enh_desc_get_tx_len(struct dma_desc *p) { - return p->des01.etx.buffer1_size; + return (p->des1 & ETDES1_BUFFER1_SIZE_MASK); } static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) @@ -126,50 +134,55 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, struct dma_extended_desc *p) { - if (unlikely(p->basic.des01.erx.rx_mac_addr)) { - if (p->des4.erx.ip_hdr_err) + unsigned int rdes0 = p->basic.des0; + unsigned int rdes4 = p->des4; + + if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) { + int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes4 & ERDES4_IP_HDR_ERR) x->ip_hdr_err++; - if (p->des4.erx.ip_payload_err) + if (rdes4 & ERDES4_IP_PAYLOAD_ERR) x->ip_payload_err++; - if (p->des4.erx.ip_csum_bypassed) + if (rdes4 & ERDES4_IP_CSUM_BYPASSED) x->ip_csum_bypassed++; - if (p->des4.erx.ipv4_pkt_rcvd) + if (rdes4 & ERDES4_IPV4_PKT_RCVD) x->ipv4_pkt_rcvd++; - if (p->des4.erx.ipv6_pkt_rcvd) + if (rdes4 & ERDES4_IPV6_PKT_RCVD) x->ipv6_pkt_rcvd++; - if (p->des4.erx.msg_type == RDES_EXT_SYNC) + if (message_type == RDES_EXT_SYNC) x->rx_msg_type_sync++; - else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP) + else if (message_type == RDES_EXT_FOLLOW_UP) x->rx_msg_type_follow_up++; - else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) + else if (message_type == RDES_EXT_DELAY_REQ) x->rx_msg_type_delay_req++; - else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) + else if (message_type == RDES_EXT_DELAY_RESP) x->rx_msg_type_delay_resp++; - else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ) + else if (message_type == RDES_EXT_PDELAY_REQ) x->rx_msg_type_pdelay_req++; - else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) + else if (message_type == RDES_EXT_PDELAY_RESP) x->rx_msg_type_pdelay_resp++; - else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP) + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) x->rx_msg_type_pdelay_follow_up++; else x->rx_msg_type_ext_no_ptp++; - if (p->des4.erx.ptp_frame_type) + if (rdes4 & ERDES4_PTP_FRAME_TYPE) x->ptp_frame_type++; - if (p->des4.erx.ptp_ver) + if (rdes4 & ERDES4_PTP_VER) x->ptp_ver++; - if (p->des4.erx.timestamp_dropped) + if (rdes4 & ERDES4_TIMESTAMP_DROPPED) x->timestamp_dropped++; - if (p->des4.erx.av_pkt_rcvd) + if (rdes4 & ERDES4_AV_PKT_RCVD) x->av_pkt_rcvd++; - if (p->des4.erx.av_tagged_pkt_rcvd) + if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD) x->av_tagged_pkt_rcvd++; - if (p->des4.erx.vlan_tag_priority_val) + if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18) x->vlan_tag_priority_val++; - if (p->des4.erx.l3_filter_match) + if (rdes4 & ERDES4_L3_FILTER_MATCH) x->l3_filter_match++; - if (p->des4.erx.l4_filter_match) + if (rdes4 & ERDES4_L4_FILTER_MATCH) x->l4_filter_match++; - if (p->des4.erx.l3_l4_filter_no_match) + if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26) x->l3_l4_filter_no_match++; } } @@ -177,30 +190,33 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { - int ret = good_frame; struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes0 = p->des0; + int ret = good_frame; + + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; - if (unlikely(p->des01.erx.error_summary)) { - if (unlikely(p->des01.erx.descriptor_error)) { + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++; stats->rx_length_errors++; } - if (unlikely(p->des01.erx.overflow_error)) + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) x->rx_gmac_overflow++; - if (unlikely(p->des01.erx.ipc_csum_error)) + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) pr_err("\tIPC Csum Error/Giant frame\n"); - if (unlikely(p->des01.erx.late_collision)) { + if (unlikely(rdes0 & RDES0_COLLISION)) stats->collisions++; - } - if (unlikely(p->des01.erx.receive_watchdog)) + if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG)) x->rx_watchdog++; - if (unlikely(p->des01.erx.error_gmii)) + if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */ x->rx_mii++; - if (unlikely(p->des01.erx.crc_error)) { + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { x->rx_crc++; stats->rx_crc_errors++; } @@ -211,26 +227,27 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, * It doesn't match with the information reported into the databook. * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ - ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, - p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR)); - if (unlikely(p->des01.erx.dribbling)) + if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++; - if (unlikely(p->des01.erx.sa_filter_fail)) { + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) { x->sa_rx_filter_fail++; ret = discard_frame; } - if (unlikely(p->des01.erx.da_filter_fail)) { + if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) { x->da_rx_filter_fail++; ret = discard_frame; } - if (unlikely(p->des01.erx.length_error)) { + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { x->rx_length++; ret = discard_frame; } #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.erx.vlan_tag) + if (rdes0 & RDES0_VLAN_TAG) x->rx_vlan++; #endif @@ -240,110 +257,125 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des01.all_flags = 0; - p->des01.erx.own = 1; - p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + p->des0 |= RDES0_OWN; + p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) - ehn_desc_rx_set_on_chain(p, end); + ehn_desc_rx_set_on_chain(p); else ehn_desc_rx_set_on_ring(p, end); if (disable_rx_ic) - p->des01.erx.disable_ic = 1; + p->des1 |= ERDES1_DISABLE_IC; } static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) { - p->des01.all_flags = 0; + p->des0 &= ~ETDES0_OWN; if (mode == STMMAC_CHAIN_MODE) - ehn_desc_tx_set_on_chain(p, end); + enh_desc_end_tx_desc_on_chain(p); else - ehn_desc_tx_set_on_ring(p, end); + enh_desc_end_tx_desc_on_ring(p, end); } static int enh_desc_get_tx_owner(struct dma_desc *p) { - return p->des01.etx.own; -} - -static int enh_desc_get_rx_owner(struct dma_desc *p) -{ - return p->des01.erx.own; + return (p->des0 & ETDES0_OWN) >> 31; } static void enh_desc_set_tx_owner(struct dma_desc *p) { - p->des01.etx.own = 1; + p->des0 |= ETDES0_OWN; } static void enh_desc_set_rx_owner(struct dma_desc *p) { - p->des01.erx.own = 1; + p->des0 |= RDES0_OWN; } static int enh_desc_get_tx_ls(struct dma_desc *p) { - return p->des01.etx.last_segment; + return (p->des0 & ETDES0_LAST_SEGMENT) >> 29; } static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) { - int ter = p->des01.etx.end_ring; + int ter = (p->des0 & ETDES0_END_RING) >> 21; memset(p, 0, offsetof(struct dma_desc, des2)); if (mode == STMMAC_CHAIN_MODE) - enh_desc_end_tx_desc_on_chain(p, ter); + enh_desc_end_tx_desc_on_chain(p); else enh_desc_end_tx_desc_on_ring(p, ter); } static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag, int mode) + bool csum_flag, int mode, bool tx_own, + bool ls) { - p->des01.etx.first_segment = is_fs; + unsigned int tdes0 = p->des0; if (mode == STMMAC_CHAIN_MODE) enh_set_tx_desc_len_on_chain(p, len); else enh_set_tx_desc_len_on_ring(p, len); + if (is_fs) + tdes0 |= ETDES0_FIRST_SEGMENT; + else + tdes0 &= ~ETDES0_FIRST_SEGMENT; + if (likely(csum_flag)) - p->des01.etx.checksum_insertion = cic_full; -} + tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + else + tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); -static void enh_desc_clear_tx_ic(struct dma_desc *p) -{ - p->des01.etx.interrupt = 0; + if (ls) + tdes0 |= ETDES0_LAST_SEGMENT; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes0 |= ETDES0_OWN; + + if (is_fs & tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + wmb(); + + p->des0 = tdes0; } -static void enh_desc_close_tx_desc(struct dma_desc *p) +static void enh_desc_set_tx_ic(struct dma_desc *p) { - p->des01.etx.last_segment = 1; - p->des01.etx.interrupt = 1; + p->des0 |= ETDES0_INTERRUPT; } static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { + unsigned int csum = 0; /* The type-1 checksum offload engines append the checksum at * the end of frame and the two bytes of checksum are added in * the length. * Adjust for that in the framelen for type-1 checksum offload - * engines. */ + * engines. + */ if (rx_coe_type == STMMAC_RX_COE_TYPE1) - return p->des01.erx.frame_length - 2; - else - return p->des01.erx.frame_length; + csum = 2; + + return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) - + csum); } static void enh_desc_enable_tx_timestamp(struct dma_desc *p) { - p->des01.etx.time_stamp_enable = 1; + p->des0 |= ETDES0_TIME_STAMP_ENABLE; } static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) { - return p->des01.etx.time_stamp_status; + return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17; } static u64 enh_desc_get_timestamp(void *desc, u32 ats) @@ -368,7 +400,7 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) { if (ats) { struct dma_extended_desc *p = (struct dma_extended_desc *)desc; - return p->basic.des01.erx.ipc_csum_error; + return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7; } else { struct dma_desc *p = (struct dma_desc *)desc; if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) @@ -386,11 +418,9 @@ const struct stmmac_desc_ops enh_desc_ops = { .init_rx_desc = enh_desc_init_rx_desc, .init_tx_desc = enh_desc_init_tx_desc, .get_tx_owner = enh_desc_get_tx_owner, - .get_rx_owner = enh_desc_get_rx_owner, .release_tx_desc = enh_desc_release_tx_desc, .prepare_tx_desc = enh_desc_prepare_tx_desc, - .clear_tx_ic = enh_desc_clear_tx_ic, - .close_tx_desc = enh_desc_close_tx_desc, + .set_tx_ic = enh_desc_set_tx_ic, .get_tx_ls = enh_desc_get_tx_ls, .set_tx_owner = enh_desc_set_tx_owner, .set_rx_owner = enh_desc_set_rx_owner, diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 48c3456445b2..e13228f115f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -29,33 +29,47 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { - int ret = 0; struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = p->des0; + unsigned int tdes1 = p->des1; + int ret = tx_done; - if (unlikely(p->des01.tx.error_summary)) { - if (unlikely(p->des01.tx.underflow_error)) { + /* Get tx owner first */ + if (unlikely(tdes0 & TDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes1 & TDES1_LAST_SEGMENT))) + return tx_not_ls; + + if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) { x->tx_underflow++; stats->tx_fifo_errors++; } - if (unlikely(p->des01.tx.no_carrier)) { + if (unlikely(tdes0 & TDES0_NO_CARRIER)) { x->tx_carrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.tx.loss_carrier)) { + if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) { x->tx_losscarrier++; stats->tx_carrier_errors++; } - if (unlikely((p->des01.tx.excessive_deferral) || - (p->des01.tx.excessive_collisions) || - (p->des01.tx.late_collision))) - stats->collisions += p->des01.tx.collision_count; - ret = -1; + if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) || + (tdes0 & TDES0_EXCESSIVE_COLLISIONS) || + (tdes0 & TDES0_LATE_COLLISION))) { + unsigned int collisions; + + collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3; + stats->collisions += collisions; + } + ret = tx_err; } - if (p->des01.etx.vlan_frame) + if (tdes0 & TDES0_VLAN_FRAME) x->tx_vlan++; - if (unlikely(p->des01.tx.deferred)) + if (unlikely(tdes0 & TDES0_DEFERRED)) x->tx_deferred++; return ret; @@ -63,7 +77,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, static int ndesc_get_tx_len(struct dma_desc *p) { - return p->des01.tx.buffer1_size; + return (p->des1 & RDES1_BUFFER1_SIZE_MASK); } /* This function verifies if each incoming frame has some errors @@ -74,47 +88,51 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { int ret = good_frame; + unsigned int rdes0 = p->des0; struct net_device_stats *stats = (struct net_device_stats *)data; - if (unlikely(p->des01.rx.last_descriptor == 0)) { + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; + + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { pr_warn("%s: Oversized frame spanned multiple buffers\n", __func__); stats->rx_length_errors++; return discard_frame; } - if (unlikely(p->des01.rx.error_summary)) { - if (unlikely(p->des01.rx.descriptor_error)) + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) x->rx_desc++; - if (unlikely(p->des01.rx.sa_filter_fail)) + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) x->sa_filter_fail++; - if (unlikely(p->des01.rx.overflow_error)) + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) x->overflow_error++; - if (unlikely(p->des01.rx.ipc_csum_error)) + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) x->ipc_csum_error++; - if (unlikely(p->des01.rx.collision)) { + if (unlikely(rdes0 & RDES0_COLLISION)) { x->rx_collision++; stats->collisions++; } - if (unlikely(p->des01.rx.crc_error)) { + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { x->rx_crc++; stats->rx_crc_errors++; } ret = discard_frame; } - if (unlikely(p->des01.rx.dribbling)) + if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++; - if (unlikely(p->des01.rx.length_error)) { + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { x->rx_length++; ret = discard_frame; } - if (unlikely(p->des01.rx.mii_error)) { + if (unlikely(rdes0 & RDES0_MII_ERROR)) { x->rx_mii++; ret = discard_frame; } #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.rx.vlan_tag) + if (rdes0 & RDES0_VLAN_TAG) x->vlan_tag++; #endif return ret; @@ -123,9 +141,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des01.all_flags = 0; - p->des01.rx.own = 1; - p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + p->des0 |= RDES0_OWN; + p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK; if (mode == STMMAC_CHAIN_MODE) ndesc_rx_set_on_chain(p, end); @@ -133,99 +150,110 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, ndesc_rx_set_on_ring(p, end); if (disable_rx_ic) - p->des01.rx.disable_ic = 1; + p->des1 |= RDES1_DISABLE_IC; } static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) { - p->des01.all_flags = 0; + p->des0 &= ~TDES0_OWN; if (mode == STMMAC_CHAIN_MODE) - ndesc_tx_set_on_chain(p, end); + ndesc_tx_set_on_chain(p); else - ndesc_tx_set_on_ring(p, end); + ndesc_end_tx_desc_on_ring(p, end); } static int ndesc_get_tx_owner(struct dma_desc *p) { - return p->des01.tx.own; -} - -static int ndesc_get_rx_owner(struct dma_desc *p) -{ - return p->des01.rx.own; + return (p->des0 & TDES0_OWN) >> 31; } static void ndesc_set_tx_owner(struct dma_desc *p) { - p->des01.tx.own = 1; + p->des0 |= TDES0_OWN; } static void ndesc_set_rx_owner(struct dma_desc *p) { - p->des01.rx.own = 1; + p->des0 |= RDES0_OWN; } static int ndesc_get_tx_ls(struct dma_desc *p) { - return p->des01.tx.last_segment; + return (p->des1 & TDES1_LAST_SEGMENT) >> 30; } static void ndesc_release_tx_desc(struct dma_desc *p, int mode) { - int ter = p->des01.tx.end_ring; + int ter = (p->des1 & TDES1_END_RING) >> 25; memset(p, 0, offsetof(struct dma_desc, des2)); if (mode == STMMAC_CHAIN_MODE) - ndesc_end_tx_desc_on_chain(p, ter); + ndesc_tx_set_on_chain(p); else ndesc_end_tx_desc_on_ring(p, ter); } static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag, int mode) + bool csum_flag, int mode, bool tx_own, + bool ls) { - p->des01.tx.first_segment = is_fs; + unsigned int tdes1 = p->des1; + if (mode == STMMAC_CHAIN_MODE) norm_set_tx_desc_len_on_chain(p, len); else norm_set_tx_desc_len_on_ring(p, len); + if (is_fs) + tdes1 |= TDES1_FIRST_SEGMENT; + else + tdes1 &= ~TDES1_FIRST_SEGMENT; + if (likely(csum_flag)) - p->des01.tx.checksum_insertion = cic_full; -} + tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT; + else + tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT); -static void ndesc_clear_tx_ic(struct dma_desc *p) -{ - p->des01.tx.interrupt = 0; + if (ls) + tdes1 |= TDES1_LAST_SEGMENT; + + if (tx_own) + tdes1 |= TDES0_OWN; + + p->des1 = tdes1; } -static void ndesc_close_tx_desc(struct dma_desc *p) +static void ndesc_set_tx_ic(struct dma_desc *p) { - p->des01.tx.last_segment = 1; - p->des01.tx.interrupt = 1; + p->des1 |= TDES1_INTERRUPT; } static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { + unsigned int csum = 0; + /* The type-1 checksum offload engines append the checksum at * the end of frame and the two bytes of checksum are added in * the length. * Adjust for that in the framelen for type-1 checksum offload - * engines. */ + * engines + */ if (rx_coe_type == STMMAC_RX_COE_TYPE1) - return p->des01.rx.frame_length - 2; - else - return p->des01.rx.frame_length; + csum = 2; + + return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) - + csum); + } static void ndesc_enable_tx_timestamp(struct dma_desc *p) { - p->des01.tx.time_stamp_enable = 1; + p->des1 |= TDES1_TIME_STAMP_ENABLE; } static int ndesc_get_tx_timestamp_status(struct dma_desc *p) { - return p->des01.tx.time_stamp_status; + return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17; } static u64 ndesc_get_timestamp(void *desc, u32 ats) @@ -258,11 +286,9 @@ const struct stmmac_desc_ops ndesc_ops = { .init_rx_desc = ndesc_init_rx_desc, .init_tx_desc = ndesc_init_tx_desc, .get_tx_owner = ndesc_get_tx_owner, - .get_rx_owner = ndesc_get_rx_owner, .release_tx_desc = ndesc_release_tx_desc, .prepare_tx_desc = ndesc_prepare_tx_desc, - .clear_tx_ic = ndesc_clear_tx_ic, - .close_tx_desc = ndesc_close_tx_desc, + .set_tx_ic = ndesc_set_tx_ic, .get_tx_ls = ndesc_get_tx_ls, .set_tx_owner = ndesc_set_tx_owner, .set_rx_owner = ndesc_set_rx_owner, diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 5dd50c6cda5b..7723b5d2499a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -31,8 +31,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; - unsigned int txsize = priv->dma_tx_size; - unsigned int entry = priv->cur_tx % txsize; + unsigned int entry = priv->cur_tx; struct dma_desc *desc; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax, len; @@ -57,12 +56,14 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = bmax; + priv->tx_skbuff_dma[entry].is_jumbo = true; + desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, - STMMAC_RING_MODE); - wmb(); + STMMAC_RING_MODE, 0, false); priv->tx_skbuff[entry] = NULL; - entry = (++priv->cur_tx) % txsize; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (priv->extend_desc) desc = (struct dma_desc *)(priv->dma_etx + entry); @@ -74,22 +75,27 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = len; + priv->tx_skbuff_dma[entry].is_jumbo = true; + desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_RING_MODE); - wmb(); - priv->hw->desc->set_tx_owner(desc); + STMMAC_RING_MODE, 1, true); } else { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = nopaged_len; + priv->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, - STMMAC_RING_MODE); + STMMAC_RING_MODE, 0, true); } + priv->cur_tx = entry; + return entry; } @@ -120,7 +126,13 @@ static void stmmac_init_desc3(struct dma_desc *p) static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { - if (unlikely(p->des3)) + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + unsigned int entry = priv->dirty_tx; + + /* des3 is only used for jumbo frames tx or time stamping */ + if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo || + (priv->tx_skbuff_dma[entry].last_segment && + !priv->extend_desc && priv->hwts_tx_en))) p->des3 = 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 1f3b33a6c6a8..8bbab97895fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -24,7 +24,7 @@ #define __STMMAC_H__ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "March_2013" +#define DRV_MODULE_VERSION "Oct_2015" #include <linux/clk.h> #include <linux/stmmac.h> @@ -45,6 +45,9 @@ struct stmmac_resources { struct stmmac_tx_info { dma_addr_t buf; bool map_as_page; + unsigned len; + bool last_segment; + bool is_jumbo; }; struct stmmac_priv { @@ -54,7 +57,6 @@ struct stmmac_priv { struct sk_buff **tx_skbuff; unsigned int cur_tx; unsigned int dirty_tx; - unsigned int dma_tx_size; u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; @@ -71,8 +73,9 @@ struct stmmac_priv { struct sk_buff **rx_skbuff; unsigned int cur_rx; unsigned int dirty_rx; - unsigned int dma_rx_size; unsigned int dma_buf_sz; + unsigned int rx_copybreak; + unsigned int rx_zeroc_thresh; u32 rx_riwt; int hwts_rx_en; dma_addr_t *rx_skbuff_dma; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 4c6486cc80fb..3c7928edfebb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -97,7 +97,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(napi_poll), STMMAC_STAT(tx_normal_irq_n), STMMAC_STAT(tx_clean), - STMMAC_STAT(tx_reset_ic_bit), + STMMAC_STAT(tx_set_ic_bit), STMMAC_STAT(irq_receive_pmt_irq_n), /* MMC info */ STMMAC_STAT(mmc_tx_irq_n), @@ -781,6 +781,43 @@ static int stmmac_get_ts_info(struct net_device *dev, return ethtool_op_get_ts_info(dev, info); } +static int stmmac_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = priv->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int stmmac_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + priv->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct ethtool_ops stmmac_ethtool_ops = { .begin = stmmac_check_if_running, .get_drvinfo = stmmac_ethtool_getdrvinfo, @@ -803,6 +840,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, + .get_tunable = stmmac_get_tunable, + .set_tunable = stmmac_set_tunable, }; void stmmac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c21015b68097..4c5ce9848ca9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -71,15 +71,8 @@ static int phyaddr = -1; module_param(phyaddr, int, S_IRUGO); MODULE_PARM_DESC(phyaddr, "Physical device address"); -#define DMA_TX_SIZE 256 -static int dma_txsize = DMA_TX_SIZE; -module_param(dma_txsize, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); - -#define DMA_RX_SIZE 256 -static int dma_rxsize = DMA_RX_SIZE; -module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); +#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) +#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) static int flow_ctrl = FLOW_OFF; module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); @@ -99,6 +92,8 @@ static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); +#define STMMAC_RX_COPYBREAK 256 + static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); @@ -134,10 +129,6 @@ static void stmmac_verify_args(void) { if (unlikely(watchdog < 0)) watchdog = TX_TIMEO; - if (unlikely(dma_rxsize < 0)) - dma_rxsize = DMA_RX_SIZE; - if (unlikely(dma_txsize < 0)) - dma_txsize = DMA_TX_SIZE; if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) buf_sz = DEFAULT_BUFSIZE; if (unlikely(flow_ctrl > 1)) @@ -197,12 +188,28 @@ static void print_pkt(unsigned char *buf, int len) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); } -/* minimum number of free TX descriptors required to wake up TX process */ -#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) - static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) { - return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; + unsigned avail; + + if (priv->dirty_tx > priv->cur_tx) + avail = priv->dirty_tx - priv->cur_tx - 1; + else + avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; + + return avail; +} + +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) +{ + unsigned dirty; + + if (priv->dirty_rx <= priv->cur_rx) + dirty = priv->cur_rx - priv->dirty_rx; + else + dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; + + return dirty; } /** @@ -862,6 +869,12 @@ static int stmmac_init_phy(struct net_device *dev) phy_disconnect(phydev); return -ENODEV; } + + /* If attached to a switch, there is no reason to poll phy handler */ + if (priv->plat->phy_bus_name) + if (!strcmp(priv->plat->phy_bus_name, "fixed")) + phydev->irq = PHY_IGNORE_INTERRUPT; + pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); @@ -906,19 +919,16 @@ static void stmmac_display_ring(void *head, int size, int extend_desc) static void stmmac_display_rings(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; - if (priv->extend_desc) { pr_info("Extended RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); pr_info("Extended TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1); } else { pr_info("RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); pr_info("TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0); } } @@ -947,28 +957,26 @@ static int stmmac_set_bfsize(int mtu, int bufsize) static void stmmac_clear_descriptors(struct stmmac_priv *priv) { int i; - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; /* Clear the Rx/Tx descriptors */ - for (i = 0; i < rxsize; i++) + for (i = 0; i < DMA_RX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == rxsize - 1)); + (i == DMA_RX_SIZE - 1)); else priv->hw->desc->init_rx_desc(&priv->dma_rx[i], priv->use_riwt, priv->mode, - (i == rxsize - 1)); - for (i = 0; i < txsize; i++) + (i == DMA_RX_SIZE - 1)); + for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); else priv->hw->desc->init_tx_desc(&priv->dma_tx[i], priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); } /** @@ -1031,8 +1039,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) { int i; struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize = 0; int ret = -ENOMEM; @@ -1044,10 +1050,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) priv->dma_buf_sz = bfsize; - if (netif_msg_probe(priv)) - pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__, - txsize, rxsize, bfsize); - if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); @@ -1055,7 +1057,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) /* RX INITIALIZATION */ pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); } - for (i = 0; i < rxsize; i++) { + for (i = 0; i < DMA_RX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((priv->dma_erx + i)->basic); @@ -1072,26 +1074,26 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) (unsigned int)priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; - priv->dirty_rx = (unsigned int)(i - rxsize); + priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); buf_sz = bfsize; /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) { priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, - rxsize, 1); + DMA_RX_SIZE, 1); priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, - txsize, 1); + DMA_TX_SIZE, 1); } else { priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, - rxsize, 0); + DMA_RX_SIZE, 0); priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, - txsize, 0); + DMA_TX_SIZE, 0); } } /* TX INITIALIZATION */ - for (i = 0; i < txsize; i++) { + for (i = 0; i < DMA_TX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((priv->dma_etx + i)->basic); @@ -1100,6 +1102,8 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) p->des2 = 0; priv->tx_skbuff_dma[i].buf = 0; priv->tx_skbuff_dma[i].map_as_page = false; + priv->tx_skbuff_dma[i].len = 0; + priv->tx_skbuff_dma[i].last_segment = false; priv->tx_skbuff[i] = NULL; } @@ -1123,7 +1127,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_rx_size; i++) + for (i = 0; i < DMA_RX_SIZE; i++) stmmac_free_rx_buffers(priv, i); } @@ -1131,7 +1135,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_tx_size; i++) { + for (i = 0; i < DMA_TX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1143,12 +1147,12 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) if (priv->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, priv->tx_skbuff_dma[i].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[i].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, priv->tx_skbuff_dma[i].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[i].len, DMA_TO_DEVICE); } @@ -1171,33 +1175,31 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) */ static int alloc_dma_desc_resources(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; int ret = -ENOMEM; - priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), + priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), GFP_KERNEL); if (!priv->rx_skbuff_dma) return -ENOMEM; - priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), + priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->rx_skbuff) goto err_rx_skbuff; - priv->tx_skbuff_dma = kmalloc_array(txsize, + priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, sizeof(*priv->tx_skbuff_dma), GFP_KERNEL); if (!priv->tx_skbuff_dma) goto err_tx_skbuff_dma; - priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), + priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->tx_skbuff) goto err_tx_skbuff; if (priv->extend_desc) { - priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize * + priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), &priv->dma_rx_phy, @@ -1205,31 +1207,31 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) if (!priv->dma_erx) goto err_dma; - priv->dma_etx = dma_zalloc_coherent(priv->device, txsize * + priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_extended_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_etx) { - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), priv->dma_erx, priv->dma_rx_phy); goto err_dma; } } else { - priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize * + priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); if (!priv->dma_rx) goto err_dma; - priv->dma_tx = dma_zalloc_coherent(priv->device, txsize * + priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_tx) { - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_desc), priv->dma_rx, priv->dma_rx_phy); goto err_dma; @@ -1258,16 +1260,16 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) /* Free DMA regions of consistent memory previously allocated */ if (!priv->extend_desc) { dma_free_coherent(priv->device, - priv->dma_tx_size * sizeof(struct dma_desc), + DMA_TX_SIZE * sizeof(struct dma_desc), priv->dma_tx, priv->dma_tx_phy); dma_free_coherent(priv->device, - priv->dma_rx_size * sizeof(struct dma_desc), + DMA_RX_SIZE * sizeof(struct dma_desc), priv->dma_rx, priv->dma_rx_phy); } else { - dma_free_coherent(priv->device, priv->dma_tx_size * + dma_free_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_extended_desc), priv->dma_etx, priv->dma_tx_phy); - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), priv->dma_erx, priv->dma_rx_phy); } @@ -1312,62 +1314,59 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) */ static void stmmac_tx_clean(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned int entry = priv->dirty_tx; spin_lock(&priv->tx_lock); priv->xstats.tx_clean++; - while (priv->dirty_tx != priv->cur_tx) { - int last; - unsigned int entry = priv->dirty_tx % txsize; + while (entry != priv->cur_tx) { struct sk_buff *skb = priv->tx_skbuff[entry]; struct dma_desc *p; + int status; if (priv->extend_desc) p = (struct dma_desc *)(priv->dma_etx + entry); else p = priv->dma_tx + entry; - /* Check if the descriptor is owned by the DMA. */ - if (priv->hw->desc->get_tx_owner(p)) - break; - - /* Verify tx error by looking at the last segment. */ - last = priv->hw->desc->get_tx_ls(p); - if (likely(last)) { - int tx_error = - priv->hw->desc->tx_status(&priv->dev->stats, + status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, priv->ioaddr); - if (likely(tx_error == 0)) { + /* Check if the descriptor is owned by the DMA */ + if (unlikely(status & tx_dma_own)) + break; + + /* Just consider the last segment and ...*/ + if (likely(!(status & tx_not_ls))) { + /* ... verify the status error condition */ + if (unlikely(status & tx_err)) { + priv->dev->stats.tx_errors++; + } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; - } else - priv->dev->stats.tx_errors++; - + } stmmac_get_tx_hwtstamp(priv, entry, skb); } - if (netif_msg_tx_done(priv)) - pr_debug("%s: curr %d, dirty %d\n", __func__, - priv->cur_tx, priv->dirty_tx); if (likely(priv->tx_skbuff_dma[entry].buf)) { if (priv->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, priv->tx_skbuff_dma[entry].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, priv->tx_skbuff_dma[entry].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry].buf = 0; priv->tx_skbuff_dma[entry].map_as_page = false; } priv->hw->mode->clean_desc3(priv, p); + priv->tx_skbuff_dma[entry].last_segment = false; + priv->tx_skbuff_dma[entry].is_jumbo = false; if (likely(skb != NULL)) { pkts_compl++; @@ -1378,16 +1377,17 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->hw->desc->release_tx_desc(p, priv->mode); - priv->dirty_tx++; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); } + priv->dirty_tx = entry; netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); if (unlikely(netif_queue_stopped(priv->dev) && - stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { + stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && - stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { + stmmac_tx_avail(priv) > STMMAC_TX_THRESH) { if (netif_msg_tx_done(priv)) pr_debug("%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); @@ -1421,20 +1421,19 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) static void stmmac_tx_err(struct stmmac_priv *priv) { int i; - int txsize = priv->dma_tx_size; netif_stop_queue(priv->dev); priv->hw->dma->stop_tx(priv->ioaddr); dma_free_tx_skbufs(priv); - for (i = 0; i < txsize; i++) + for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); else priv->hw->desc->init_tx_desc(&priv->dma_tx[i], priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); priv->dirty_tx = 0; priv->cur_tx = 0; netdev_reset_queue(priv->dev); @@ -1635,23 +1634,35 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { - int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; + int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0; int mixed_burst = 0; int atds = 0; + int ret = 0; if (priv->plat->dma_cfg) { pbl = priv->plat->dma_cfg->pbl; fixed_burst = priv->plat->dma_cfg->fixed_burst; mixed_burst = priv->plat->dma_cfg->mixed_burst; - burst_len = priv->plat->dma_cfg->burst_len; + aal = priv->plat->dma_cfg->aal; } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) atds = 1; - return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, - burst_len, priv->dma_tx_phy, - priv->dma_rx_phy, atds); + ret = priv->hw->dma->reset(priv->ioaddr); + if (ret) { + dev_err(priv->device, "Failed to reset the dma\n"); + return ret; + } + + priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, + aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); + + if ((priv->synopsys_id >= DWMAC_CORE_3_50) && + (priv->plat->axi && priv->hw->dma->axi)) + priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); + + return ret; } /** @@ -1799,10 +1810,8 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - /* Create and initialize the TX/RX descriptors chains. */ - priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); - priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + priv->rx_copybreak = STMMAC_RX_COPYBREAK; ret = alloc_dma_desc_resources(priv); if (ret < 0) { @@ -1943,13 +1952,12 @@ static int stmmac_release(struct net_device *dev) static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - int entry; + unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int entry, first_entry; struct dma_desc *desc, *first; - unsigned int nopaged_len = skb_headlen(skb); - unsigned int enh_desc = priv->plat->enh_desc; + unsigned int enh_desc; spin_lock(&priv->tx_lock); @@ -1966,31 +1974,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); - entry = priv->cur_tx % txsize; + entry = priv->cur_tx; + first_entry = entry; csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); - if (priv->extend_desc) + if (likely(priv->extend_desc)) desc = (struct dma_desc *)(priv->dma_etx + entry); else desc = priv->dma_tx + entry; first = desc; + priv->tx_skbuff[first_entry] = skb; + + enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); - if (likely(!is_jumbo)) { - desc->des2 = dma_map_single(priv->device, skb->data, - nopaged_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, desc->des2)) - goto dma_map_err; - priv->tx_skbuff_dma[entry].buf = desc->des2; - priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, - csum_insertion, priv->mode); - } else { - desc = first; + if (unlikely(is_jumbo)) { entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); if (unlikely(entry < 0)) goto dma_map_err; @@ -1999,10 +2002,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); + bool last_segment = (i == (nfrags - 1)); - priv->tx_skbuff[entry] = NULL; - entry = (++priv->cur_tx) % txsize; - if (priv->extend_desc) + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + + if (likely(priv->extend_desc)) desc = (struct dma_desc *)(priv->dma_etx + entry); else desc = priv->dma_tx + entry; @@ -2012,53 +2016,37 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, desc->des2)) goto dma_map_err; /* should reuse desc w/o issues */ + priv->tx_skbuff[entry] = NULL; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].map_as_page = true; + priv->tx_skbuff_dma[entry].len = len; + priv->tx_skbuff_dma[entry].last_segment = last_segment; + + /* Prepare the descriptor and set the own bit too */ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, - priv->mode); - wmb(); - priv->hw->desc->set_tx_owner(desc); - wmb(); + priv->mode, 1, last_segment); } - priv->tx_skbuff[entry] = skb; - - /* Finalize the latest segment. */ - priv->hw->desc->close_tx_desc(desc); + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); - wmb(); - /* According to the coalesce parameter the IC bit for the latest - * segment could be reset and the timer re-started to invoke the - * stmmac_tx function. This approach takes care about the fragments. - */ - priv->tx_count_frames += nfrags + 1; - if (priv->tx_coal_frames > priv->tx_count_frames) { - priv->hw->desc->clear_tx_ic(desc); - priv->xstats.tx_reset_ic_bit++; - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else - priv->tx_count_frames = 0; - - /* To avoid raise condition */ - priv->hw->desc->set_tx_owner(first); - wmb(); - - priv->cur_tx++; + priv->cur_tx = entry; if (netif_msg_pktdata(priv)) { - pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", - __func__, (priv->cur_tx % txsize), - (priv->dirty_tx % txsize), entry, first, nfrags); + pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", + __func__, priv->cur_tx, priv->dirty_tx, first_entry, + entry, first, nfrags); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + stmmac_display_ring((void *)priv->dma_etx, + DMA_TX_SIZE, 1); else - stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + stmmac_display_ring((void *)priv->dma_tx, + DMA_TX_SIZE, 0); pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { if (netif_msg_hw(priv)) pr_debug("%s: stop transmitted packets\n", __func__); @@ -2067,16 +2055,59 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; - if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - /* declare that device is doing timestamping */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - priv->hw->desc->enable_tx_timestamp(first); + /* According to the coalesce parameter the IC bit for the latest + * segment is reset and the timer re-started to clean the tx status. + * This approach takes care about the fragments: desc is the first + * element in case of no SG. + */ + priv->tx_count_frames += nfrags + 1; + if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { + mod_timer(&priv->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer)); + } else { + priv->tx_count_frames = 0; + priv->hw->desc->set_tx_ic(desc); + priv->xstats.tx_set_ic_bit++; } if (!priv->hwts_tx_en) skb_tx_timestamp(skb); + /* Ready to fill the first descriptor and set the OWN bit w/o any + * problems because all the descriptors are actually ready to be + * passed to the DMA engine. + */ + if (likely(!is_jumbo)) { + bool last_segment = (nfrags == 0); + + first->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, first->des2)) + goto dma_map_err; + + priv->tx_skbuff_dma[first_entry].buf = first->des2; + priv->tx_skbuff_dma[first_entry].len = nopaged_len; + priv->tx_skbuff_dma[first_entry].last_segment = last_segment; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->enable_tx_timestamp(first); + } + + /* Prepare the first descriptor setting the OWN bit too */ + priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, + csum_insertion, priv->mode, 1, + last_segment); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + smp_wmb(); + } + netdev_sent_queue(dev, skb->len); priv->hw->dma->enable_dma_transmission(priv->ioaddr); @@ -2108,6 +2139,14 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) } +static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) +{ + if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) + return 0; + + return 1; +} + /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure @@ -2116,11 +2155,11 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) */ static inline void stmmac_rx_refill(struct stmmac_priv *priv) { - unsigned int rxsize = priv->dma_rx_size; int bfsize = priv->dma_buf_sz; + unsigned int entry = priv->dirty_rx; + int dirty = stmmac_rx_dirty(priv); - for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { - unsigned int entry = priv->dirty_rx % rxsize; + while (dirty-- > 0) { struct dma_desc *p; if (priv->extend_desc) @@ -2132,9 +2171,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); - - if (unlikely(skb == NULL)) + if (unlikely(!skb)) { + /* so for a while no zero-copy! */ + priv->rx_zeroc_thresh = STMMAC_RX_THRESH; + if (unlikely(net_ratelimit())) + dev_err(priv->device, + "fail to alloc skb entry %d\n", + entry); break; + } priv->rx_skbuff[entry] = skb; priv->rx_skbuff_dma[entry] = @@ -2150,13 +2195,20 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) priv->hw->mode->refill_desc3(priv, p); + if (priv->rx_zeroc_thresh > 0) + priv->rx_zeroc_thresh--; + if (netif_msg_rx_status(priv)) pr_debug("\trefill entry #%d\n", entry); } + wmb(); priv->hw->desc->set_rx_owner(p); wmb(); + + entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } + priv->dirty_rx = entry; } /** @@ -2168,8 +2220,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) */ static int stmmac_rx(struct stmmac_priv *priv, int limit) { - unsigned int rxsize = priv->dma_rx_size; - unsigned int entry = priv->cur_rx % rxsize; + unsigned int entry = priv->cur_rx; unsigned int next_entry; unsigned int count = 0; int coe = priv->hw->rx_csum; @@ -2177,9 +2228,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (netif_msg_rx_status(priv)) { pr_debug("%s: descriptor ring:\n", __func__); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + stmmac_display_ring((void *)priv->dma_erx, + DMA_RX_SIZE, 1); else - stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + stmmac_display_ring((void *)priv->dma_rx, + DMA_RX_SIZE, 0); } while (count < limit) { int status; @@ -2190,20 +2243,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) else p = priv->dma_rx + entry; - if (priv->hw->desc->get_rx_owner(p)) + /* read the status of the incoming frame */ + status = priv->hw->desc->rx_status(&priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) break; count++; - next_entry = (++priv->cur_rx) % rxsize; + priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); + next_entry = priv->cur_rx; + if (priv->extend_desc) prefetch(priv->dma_erx + next_entry); else prefetch(priv->dma_rx + next_entry); - /* read the status of the incoming frame */ - status = priv->hw->desc->rx_status(&priv->dev->stats, - &priv->xstats, p); if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) priv->hw->desc->rx_extended_status(&priv->dev->stats, &priv->xstats, @@ -2248,23 +2304,54 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) pr_debug("\tframe size %d, COE: %d\n", frame_len, status); } - skb = priv->rx_skbuff[entry]; - if (unlikely(!skb)) { - pr_err("%s: Inconsistent Rx descriptor chain\n", - priv->dev->name); - priv->dev->stats.rx_dropped++; - break; + + if (unlikely((frame_len < priv->rx_copybreak) || + stmmac_rx_threshold_count(priv))) { + skb = netdev_alloc_skb_ip_align(priv->dev, + frame_len); + if (unlikely(!skb)) { + if (net_ratelimit()) + dev_warn(priv->device, + "packet dropped\n"); + priv->dev->stats.rx_dropped++; + break; + } + + dma_sync_single_for_cpu(priv->device, + priv->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, + priv-> + rx_skbuff[entry]->data, + frame_len); + + skb_put(skb, frame_len); + dma_sync_single_for_device(priv->device, + priv->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + } else { + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + pr_err("%s: Inconsistent Rx chain\n", + priv->dev->name); + priv->dev->stats.rx_dropped++; + break; + } + prefetch(skb->data - NET_IP_ALIGN); + priv->rx_skbuff[entry] = NULL; + priv->rx_zeroc_thresh++; + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, + DMA_FROM_DEVICE); } - prefetch(skb->data - NET_IP_ALIGN); - priv->rx_skbuff[entry] = NULL; stmmac_get_rx_hwtstamp(priv, entry, skb); - skb_put(skb, frame_len); - dma_unmap_single(priv->device, - priv->rx_skbuff_dma[entry], - priv->dma_buf_sz, DMA_FROM_DEVICE); - if (netif_msg_pktdata(priv)) { pr_debug("frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); @@ -2555,19 +2642,17 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; if (priv->extend_desc) { seq_printf(seq, "Extended RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq); + sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); seq_printf(seq, "Extended TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq); + sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); } else { seq_printf(seq, "RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq); + sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); seq_printf(seq, "TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq); + sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); } return 0; @@ -3137,12 +3222,6 @@ static int __init stmmac_cmdline_opt(char *str) } else if (!strncmp(opt, "phyaddr:", 8)) { if (kstrtoint(opt + 8, 0, &phyaddr)) goto err; - } else if (!strncmp(opt, "dma_txsize:", 11)) { - if (kstrtoint(opt + 11, 0, &dma_txsize)) - goto err; - } else if (!strncmp(opt, "dma_rxsize:", 11)) { - if (kstrtoint(opt + 11, 0, &dma_rxsize)) - goto err; } else if (!strncmp(opt, "buf_sz:", 7)) { if (kstrtoint(opt + 7, 0, &buf_sz)) goto err; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 0faf16336035..ea76129dafc2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev) struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; int addr, found; - struct device_node *mdio_node = NULL; - struct device_node *child_node = NULL; + struct device_node *mdio_node = priv->plat->mdio_node; if (!mdio_bus_data) return 0; if (IS_ENABLED(CONFIG_OF)) { - for_each_child_of_node(priv->device->of_node, child_node) { - if (of_device_is_compatible(child_node, - "snps,dwmac-mdio")) { - mdio_node = child_node; - break; - } - } - if (mdio_node) { netdev_dbg(ndev, "FOUND MDIO subnode\n"); } else { @@ -252,6 +243,9 @@ int stmmac_mdio_register(struct net_device *ndev) goto bus_register_fail; } + if (priv->plat->phy_node || mdio_node) + goto bus_register_done; + found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); @@ -307,6 +301,7 @@ int stmmac_mdio_register(struct net_device *ndev) return -ENODEV; } +bus_register_done: priv->mii = new_bus; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index d71a721ea61c..ae4388735b7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -81,7 +81,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat) plat->mdio_bus_data->phy_mask = 0; plat->dma_cfg->pbl = 32; - plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; + /* TODO: AXI */ /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; @@ -115,8 +115,8 @@ static int quark_default_data(struct plat_stmmacenet_data *plat, plat->mdio_bus_data->phy_mask = 0; plat->dma_cfg->pbl = 16; - plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; plat->dma_cfg->fixed_burst = 1; + /* AXI (TODO) */ /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6a52fa18cbf2..dcbd2a1601e8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -96,6 +96,42 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) } /** + * stmmac_axi_setup - parse DT parameters for programming the AXI register + * @pdev: platform device + * @priv: driver private struct. + * Description: + * if required, from device-tree the AXI internal register can be tuned + * by using platform parameters. + */ +static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) +{ + struct device_node *np; + struct stmmac_axi *axi; + + np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); + if (!np) + return NULL; + + axi = kzalloc(sizeof(*axi), GFP_KERNEL); + if (!axi) + return ERR_PTR(-ENOMEM); + + axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); + axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); + axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); + axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all"); + axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); + axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); + axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); + + of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt); + of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt); + of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + + return axi; +} + +/** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure * @plat: driver data platform structure @@ -110,6 +146,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + struct device_node *child_node = NULL; plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) @@ -140,13 +177,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->phy_node = of_node_get(np); } + for_each_child_of_node(np, child_node) + if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) { + plat->mdio_node = child_node; + break; + } + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name) + if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node) plat->mdio_bus_data = NULL; else plat->mdio_bus_data = @@ -216,13 +259,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } plat->dma_cfg = dma_cfg; of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + dma_cfg->aal = of_property_read_bool(np, "snps,aal"); dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); - of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len); - if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256) - dma_cfg->burst_len = 0; } plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); if (plat->force_thresh_dma_mode) { @@ -230,6 +271,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } + plat->axi = stmmac_axi_setup(pdev); + return plat; } #else diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig index dee94b67638c..a4b40e3015e5 100644 --- a/drivers/net/ethernet/sun/Kconfig +++ b/drivers/net/ethernet/sun/Kconfig @@ -69,12 +69,28 @@ config CASSINI Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also <http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>. +config SUNVNET_COMMON + bool + depends on SUN_LDOMS + default y if SUN_LDOMS + config SUNVNET tristate "Sun Virtual Network support" depends on SUN_LDOMS ---help--- Support for virtual network devices under Sun Logical Domains. +config LDMVSW + tristate "Sun4v LDoms Virtual Switch support" + depends on SUN_LDOMS + ---help--- + Support for virtual switch devices under Sun4v Logical Domains. + This driver adds a network interface for every vsw-port node + found in the machine description of a service domain. + Linux bridge/switch software can use these interfaces for + guest domain network interconnectivity or guest domain + connection to a physical network on a service domain. + config NIU tristate "Sun Neptune 10Gbit Ethernet support" depends on PCI diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile index 1e620ff88eba..37855438b3cb 100644 --- a/drivers/net/ethernet/sun/Makefile +++ b/drivers/net/ethernet/sun/Makefile @@ -7,5 +7,7 @@ obj-$(CONFIG_SUNQE) += sunqe.o obj-$(CONFIG_SUNBMAC) += sunbmac.o obj-$(CONFIG_SUNGEM) += sungem.o obj-$(CONFIG_CASSINI) += cassini.o +obj-$(CONFIG_SUNVNET_COMMON) += sunvnet_common.o obj-$(CONFIG_SUNVNET) += sunvnet.o +obj-$(CONFIG_LDMVSW) += ldmvsw.o obj-$(CONFIG_NIU) += niu.o diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c new file mode 100644 index 000000000000..e15bf84fc6b2 --- /dev/null +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -0,0 +1,468 @@ +/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver. + * + * Copyright (C) 2016 Oracle. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/highmem.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/slab.h> +#include <linux/types.h> + +#if defined(CONFIG_IPV6) +#include <linux/icmpv6.h> +#endif + +#include <net/ip.h> +#include <net/icmp.h> +#include <net/route.h> + +#include <asm/vio.h> +#include <asm/ldc.h> + +/* This driver makes use of the common code in sunvnet_common.c */ +#include "sunvnet_common.h" + +/* Length of time before we decide the hardware is hung, + * and dev->tx_timeout() should be called to fix the problem. + */ +#define VSW_TX_TIMEOUT (10 * HZ) + +/* Static HW Addr used for the network interfaces representing vsw ports */ +static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + +#define DRV_MODULE_NAME "ldmvsw" +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "Jan 15, 2016" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; +MODULE_AUTHOR("Oracle"); +MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +/* Ordered from largest major to lowest */ +static struct vio_version vsw_versions[] = { + { .major = 1, .minor = 8 }, + { .major = 1, .minor = 7 }, + { .major = 1, .minor = 6 }, + { .major = 1, .minor = 0 }, +}; + +static void vsw_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static u32 vsw_get_msglevel(struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return port->vp->msg_enable; +} + +static void vsw_set_msglevel(struct net_device *dev, u32 value) +{ + struct vnet_port *port = netdev_priv(dev); + + port->vp->msg_enable = value; +} + +static const struct ethtool_ops vsw_ethtool_ops = { + .get_drvinfo = vsw_get_drvinfo, + .get_msglevel = vsw_get_msglevel, + .set_msglevel = vsw_set_msglevel, + .get_link = ethtool_op_get_link, +}; + +static LIST_HEAD(vnet_list); +static DEFINE_MUTEX(vnet_list_mutex); + +/* func arg to vnet_start_xmit_common() to get the proper tx port */ +static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb, + struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return port; +} + +static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct vnet_port *port = netdev_priv(dev); + + if (!port) + return 0; + + return port->q_index; +} + +/* Wrappers to common functions */ +static int vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find); +} + +static void vsw_set_rx_mode(struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return sunvnet_set_rx_mode_common(dev, port->vp); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vsw_poll_controller(struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return sunvnet_poll_controller_common(dev, port->vp); +} +#endif + +static const struct net_device_ops vsw_ops = { + .ndo_open = sunvnet_open_common, + .ndo_stop = sunvnet_close_common, + .ndo_set_rx_mode = vsw_set_rx_mode, + .ndo_set_mac_address = sunvnet_set_mac_addr_common, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = sunvnet_tx_timeout_common, + .ndo_change_mtu = sunvnet_change_mtu_common, + .ndo_start_xmit = vsw_start_xmit, + .ndo_select_queue = vsw_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vsw_poll_controller, +#endif +}; + +static const char *local_mac_prop = "local-mac-address"; +static const char *cfg_handle_prop = "cfg-handle"; + +static struct vnet *vsw_get_vnet(struct mdesc_handle *hp, + u64 port_node, + u64 *handle) +{ + struct vnet *vp; + struct vnet *iter; + const u64 *local_mac = NULL; + const u64 *cfghandle = NULL; + u64 a; + + /* Get the parent virtual-network-switch macaddr and cfghandle */ + mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { + u64 target = mdesc_arc_target(hp, a); + const char *name; + + name = mdesc_get_property(hp, target, "name", NULL); + if (!name || strcmp(name, "virtual-network-switch")) + continue; + + local_mac = mdesc_get_property(hp, target, + local_mac_prop, NULL); + cfghandle = mdesc_get_property(hp, target, + cfg_handle_prop, NULL); + break; + } + if (!local_mac || !cfghandle) + return ERR_PTR(-ENODEV); + + /* find or create associated vnet */ + vp = NULL; + mutex_lock(&vnet_list_mutex); + list_for_each_entry(iter, &vnet_list, list) { + if (iter->local_mac == *local_mac) { + vp = iter; + break; + } + } + + if (!vp) { + vp = kzalloc(sizeof(*vp), GFP_KERNEL); + if (unlikely(!vp)) { + mutex_unlock(&vnet_list_mutex); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&vp->lock); + INIT_LIST_HEAD(&vp->port_list); + INIT_LIST_HEAD(&vp->list); + vp->local_mac = *local_mac; + list_add(&vp->list, &vnet_list); + } + + mutex_unlock(&vnet_list_mutex); + + *handle = (u64)*cfghandle; + + return vp; +} + +static struct net_device *vsw_alloc_netdev(u8 hwaddr[], + struct vio_dev *vdev, + u64 handle, + u64 port_id) +{ + struct net_device *dev; + struct vnet_port *port; + int i; + + dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1); + if (!dev) + return ERR_PTR(-ENOMEM); + dev->needed_headroom = VNET_PACKET_SKIP + 8; + dev->needed_tailroom = 8; + + for (i = 0; i < ETH_ALEN; i++) { + dev->dev_addr[i] = hwaddr[i]; + dev->perm_addr[i] = dev->dev_addr[i]; + } + + sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id); + + dev->netdev_ops = &vsw_ops; + dev->ethtool_ops = &vsw_ethtool_ops; + dev->watchdog_timeo = VSW_TX_TIMEOUT; + + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + NETIF_F_HW_CSUM | NETIF_F_SG; + dev->features = dev->hw_features; + + SET_NETDEV_DEV(dev, &vdev->dev); + + return dev; +} + +static struct ldc_channel_config vsw_ldc_cfg = { + .event = sunvnet_event_common, + .mtu = 64, + .mode = LDC_MODE_UNRELIABLE, +}; + +static struct vio_driver_ops vsw_vio_ops = { + .send_attr = sunvnet_send_attr_common, + .handle_attr = sunvnet_handle_attr_common, + .handshake_complete = sunvnet_handshake_complete_common, +}; + +static void print_version(void) +{ + printk_once(KERN_INFO "%s", version); +} + +static const char *remote_macaddr_prop = "remote-mac-address"; +static const char *id_prop = "id"; + +static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct mdesc_handle *hp; + struct vnet_port *port; + unsigned long flags; + struct vnet *vp; + struct net_device *dev; + const u64 *rmac; + int len, i, err; + const u64 *port_id; + u64 handle; + + print_version(); + + hp = mdesc_grab(); + + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); + err = -ENODEV; + if (!rmac) { + pr_err("Port lacks %s property\n", remote_macaddr_prop); + mdesc_release(hp); + return err; + } + + port_id = mdesc_get_property(hp, vdev->mp, id_prop, NULL); + err = -ENODEV; + if (!port_id) { + pr_err("Port lacks %s property\n", id_prop); + mdesc_release(hp); + return err; + } + + /* Get (or create) the vnet associated with this port */ + vp = vsw_get_vnet(hp, vdev->mp, &handle); + if (unlikely(IS_ERR(vp))) { + err = PTR_ERR(vp); + pr_err("Failed to get vnet for vsw-port\n"); + mdesc_release(hp); + return err; + } + + mdesc_release(hp); + + dev = vsw_alloc_netdev(vsw_port_hwaddr, vdev, handle, *port_id); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + pr_err("Failed to alloc netdev for vsw-port\n"); + return err; + } + + port = netdev_priv(dev); + + INIT_LIST_HEAD(&port->list); + + for (i = 0; i < ETH_ALEN; i++) + port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; + + port->vp = vp; + port->dev = dev; + port->switch_port = 1; + port->tso = true; + port->tsolen = 0; + + /* Mark the port as belonging to ldmvsw which directs the + * the common code to use the net_device in the vnet_port + * rather than the net_device in the vnet (which is used + * by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE + * macro. + */ + port->vsw = 1; + + err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, + vsw_versions, ARRAY_SIZE(vsw_versions), + &vsw_vio_ops, dev->name); + if (err) + goto err_out_free_dev; + + err = vio_ldc_alloc(&port->vio, &vsw_ldc_cfg, port); + if (err) + goto err_out_free_dev; + + dev_set_drvdata(&vdev->dev, port); + + netif_napi_add(dev, &port->napi, sunvnet_poll_common, + NAPI_POLL_WEIGHT); + + spin_lock_irqsave(&vp->lock, flags); + list_add_rcu(&port->list, &vp->port_list); + spin_unlock_irqrestore(&vp->lock, flags); + + setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common, + (unsigned long)port); + + err = register_netdev(dev); + if (err) { + pr_err("Cannot register net device, aborting\n"); + goto err_out_del_timer; + } + + spin_lock_irqsave(&vp->lock, flags); + sunvnet_port_add_txq_common(port); + spin_unlock_irqrestore(&vp->lock, flags); + + napi_enable(&port->napi); + vio_port_up(&port->vio); + + netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr); + + pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name, + port->raddr, " switch-port"); + + return 0; + +err_out_del_timer: + del_timer_sync(&port->clean_timer); + list_del_rcu(&port->list); + synchronize_rcu(); + netif_napi_del(&port->napi); + dev_set_drvdata(&vdev->dev, NULL); + vio_ldc_free(&port->vio); + +err_out_free_dev: + free_netdev(dev); + return err; +} + +static int vsw_port_remove(struct vio_dev *vdev) +{ + struct vnet_port *port = dev_get_drvdata(&vdev->dev); + unsigned long flags; + + if (port) { + del_timer_sync(&port->vio.timer); + + napi_disable(&port->napi); + + list_del_rcu(&port->list); + + synchronize_rcu(); + del_timer_sync(&port->clean_timer); + spin_lock_irqsave(&port->vp->lock, flags); + sunvnet_port_rm_txq_common(port); + spin_unlock_irqrestore(&port->vp->lock, flags); + netif_napi_del(&port->napi); + sunvnet_port_free_tx_bufs_common(port); + vio_ldc_free(&port->vio); + + dev_set_drvdata(&vdev->dev, NULL); + + unregister_netdev(port->dev); + free_netdev(port->dev); + } + + return 0; +} + +static void vsw_cleanup(void) +{ + struct vnet *vp; + + /* just need to free up the vnet list */ + mutex_lock(&vnet_list_mutex); + while (!list_empty(&vnet_list)) { + vp = list_first_entry(&vnet_list, struct vnet, list); + list_del(&vp->list); + /* vio_unregister_driver() should have cleaned up port_list */ + if (!list_empty(&vp->port_list)) + pr_err("Ports not removed by VIO subsystem!\n"); + kfree(vp); + } + mutex_unlock(&vnet_list_mutex); +} + +static const struct vio_device_id vsw_port_match[] = { + { + .type = "vsw-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(vio, vsw_port_match); + +static struct vio_driver vsw_port_driver = { + .id_table = vsw_port_match, + .probe = vsw_port_probe, + .remove = vsw_port_remove, + .name = "vsw_port", +}; + +static int __init vsw_init(void) +{ + return vio_register_driver(&vsw_port_driver); +} + +static void __exit vsw_exit(void) +{ + vio_unregister_driver(&vsw_port_driver); + vsw_cleanup(); +} + +module_init(vsw_init); +module_exit(vsw_exit); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index ab6051a43134..9cc45649f477 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3341,7 +3341,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, niu_hash_page(rp, page, addr); if (rp->rbr_blocks_per_page > 1) - atomic_add(rp->rbr_blocks_per_page - 1, &page->_count); + page_ref_add(page, rp->rbr_blocks_per_page - 1); for (i = 0; i < rp->rbr_blocks_per_page; i++) { __le32 *rbr = &rp->rbr[start_index + i]; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index e23a642357e7..2437227712dc 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -51,7 +51,6 @@ #endif #ifdef CONFIG_PPC_PMAC -#include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 23fa29877f5b..a2f9b47de187 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1,6 +1,7 @@ /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> + * Copyright (C) 2016 Oracle. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -29,7 +30,12 @@ #include <asm/vio.h> #include <asm/ldc.h> -#include "sunvnet.h" +#include "sunvnet_common.h" + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define VNET_TX_TIMEOUT (5 * HZ) #define DRV_MODULE_NAME "sunvnet" #define DRV_MODULE_VERSION "1.0" @@ -42,16 +48,6 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -#define VNET_MAX_TXQS 16 - -/* Heuristic for the number of times to exponentially backoff and - * retry sending an LDC trigger when EAGAIN is encountered - */ -#define VNET_MAX_RETRIES 10 - -static int __vnet_tx_trigger(struct vnet_port *port, u32 start); -static void vnet_port_reset(struct vnet_port *port); - /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { { .major = 1, .minor = 8 }, @@ -60,866 +56,45 @@ static struct vio_version vnet_versions[] = { { .major = 1, .minor = 0 }, }; -static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) -{ - return vio_dring_avail(dr, VNET_TX_RING_SIZE); -} - -static int vnet_handle_unknown(struct vnet_port *port, void *arg) -{ - struct vio_msg_tag *pkt = arg; - - pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", - pkt->type, pkt->stype, pkt->stype_env, pkt->sid); - pr_err("Resetting connection\n"); - - ldc_disconnect(port->vio.lp); - - return -ECONNRESET; -} - -static int vnet_port_alloc_tx_ring(struct vnet_port *port); - -static int vnet_send_attr(struct vio_driver_state *vio) -{ - struct vnet_port *port = to_vnet_port(vio); - struct net_device *dev = port->vp->dev; - struct vio_net_attr_info pkt; - int framelen = ETH_FRAME_LEN; - int i, err; - - err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); - if (err) - return err; - - memset(&pkt, 0, sizeof(pkt)); - pkt.tag.type = VIO_TYPE_CTRL; - pkt.tag.stype = VIO_SUBTYPE_INFO; - pkt.tag.stype_env = VIO_ATTR_INFO; - pkt.tag.sid = vio_send_sid(vio); - if (vio_version_before(vio, 1, 2)) - pkt.xfer_mode = VIO_DRING_MODE; - else - pkt.xfer_mode = VIO_NEW_DRING_MODE; - pkt.addr_type = VNET_ADDR_ETHERMAC; - pkt.ack_freq = 0; - for (i = 0; i < 6; i++) - pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); - if (vio_version_after(vio, 1, 3)) { - if (port->rmtu) { - port->rmtu = min(VNET_MAXPACKET, port->rmtu); - pkt.mtu = port->rmtu; - } else { - port->rmtu = VNET_MAXPACKET; - pkt.mtu = port->rmtu; - } - if (vio_version_after_eq(vio, 1, 6)) - pkt.options = VIO_TX_DRING; - } else if (vio_version_before(vio, 1, 3)) { - pkt.mtu = framelen; - } else { /* v1.3 */ - pkt.mtu = framelen + VLAN_HLEN; - } - - pkt.cflags = 0; - if (vio_version_after_eq(vio, 1, 7) && port->tso) { - pkt.cflags |= VNET_LSO_IPV4_CAPAB; - if (!port->tsolen) - port->tsolen = VNET_MAXTSO; - pkt.ipv4_lso_maxlen = port->tsolen; - } - - pkt.plnk_updt = PHYSLINK_UPDATE_NONE; - - viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " - "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " - "cflags[0x%04x] lso_max[%u]\n", - pkt.xfer_mode, pkt.addr_type, - (unsigned long long)pkt.addr, - pkt.ack_freq, pkt.plnk_updt, pkt.options, - (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); - - - return vio_ldc_send(vio, &pkt, sizeof(pkt)); -} - -static int handle_attr_info(struct vio_driver_state *vio, - struct vio_net_attr_info *pkt) -{ - struct vnet_port *port = to_vnet_port(vio); - u64 localmtu; - u8 xfer_mode; - - viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " - "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " - " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", - pkt->xfer_mode, pkt->addr_type, - (unsigned long long)pkt->addr, - pkt->ack_freq, pkt->plnk_updt, pkt->options, - (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, - pkt->ipv4_lso_maxlen); - - pkt->tag.sid = vio_send_sid(vio); - - xfer_mode = pkt->xfer_mode; - /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ - if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) - xfer_mode = VIO_NEW_DRING_MODE; - - /* MTU negotiation: - * < v1.3 - ETH_FRAME_LEN exactly - * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change - * pkt->mtu for ACK - * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly - */ - if (vio_version_before(vio, 1, 3)) { - localmtu = ETH_FRAME_LEN; - } else if (vio_version_after(vio, 1, 3)) { - localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; - localmtu = min(pkt->mtu, localmtu); - pkt->mtu = localmtu; - } else { /* v1.3 */ - localmtu = ETH_FRAME_LEN + VLAN_HLEN; - } - port->rmtu = localmtu; - - /* LSO negotiation */ - if (vio_version_after_eq(vio, 1, 7)) - port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); - else - port->tso = false; - if (port->tso) { - if (!port->tsolen) - port->tsolen = VNET_MAXTSO; - port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); - if (port->tsolen < VNET_MINTSO) { - port->tso = false; - port->tsolen = 0; - pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; - } - pkt->ipv4_lso_maxlen = port->tsolen; - } else { - pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; - pkt->ipv4_lso_maxlen = 0; - } - - /* for version >= 1.6, ACK packet mode we support */ - if (vio_version_after_eq(vio, 1, 6)) { - pkt->xfer_mode = VIO_NEW_DRING_MODE; - pkt->options = VIO_TX_DRING; - } - - if (!(xfer_mode | VIO_NEW_DRING_MODE) || - pkt->addr_type != VNET_ADDR_ETHERMAC || - pkt->mtu != localmtu) { - viodbg(HS, "SEND NET ATTR NACK\n"); - - pkt->tag.stype = VIO_SUBTYPE_NACK; - - (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); - - return -ECONNRESET; - } else { - viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " - "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " - "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", - pkt->xfer_mode, pkt->addr_type, - (unsigned long long)pkt->addr, - pkt->ack_freq, pkt->plnk_updt, pkt->options, - (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, - pkt->ipv4_lso_maxlen); - - pkt->tag.stype = VIO_SUBTYPE_ACK; - - return vio_ldc_send(vio, pkt, sizeof(*pkt)); - } - -} - -static int handle_attr_ack(struct vio_driver_state *vio, - struct vio_net_attr_info *pkt) -{ - viodbg(HS, "GOT NET ATTR ACK\n"); - - return 0; -} - -static int handle_attr_nack(struct vio_driver_state *vio, - struct vio_net_attr_info *pkt) -{ - viodbg(HS, "GOT NET ATTR NACK\n"); - - return -ECONNRESET; -} - -static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) -{ - struct vio_net_attr_info *pkt = arg; - - switch (pkt->tag.stype) { - case VIO_SUBTYPE_INFO: - return handle_attr_info(vio, pkt); - - case VIO_SUBTYPE_ACK: - return handle_attr_ack(vio, pkt); - - case VIO_SUBTYPE_NACK: - return handle_attr_nack(vio, pkt); - - default: - return -ECONNRESET; - } -} - -static void vnet_handshake_complete(struct vio_driver_state *vio) -{ - struct vio_dring_state *dr; - - dr = &vio->drings[VIO_DRIVER_RX_RING]; - dr->snd_nxt = dr->rcv_nxt = 1; - - dr = &vio->drings[VIO_DRIVER_TX_RING]; - dr->snd_nxt = dr->rcv_nxt = 1; -} - -/* The hypervisor interface that implements copying to/from imported - * memory from another domain requires that copies are done to 8-byte - * aligned buffers, and that the lengths of such copies are also 8-byte - * multiples. - * - * So we align skb->data to an 8-byte multiple and pad-out the data - * area so we can round the copy length up to the next multiple of - * 8 for the copy. - * - * The transmitter puts the actual start of the packet 6 bytes into - * the buffer it sends over, so that the IP headers after the ethernet - * header are aligned properly. These 6 bytes are not in the descriptor - * length, they are simply implied. This offset is represented using - * the VNET_PACKET_SKIP macro. - */ -static struct sk_buff *alloc_and_align_skb(struct net_device *dev, - unsigned int len) -{ - struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); - unsigned long addr, off; - - if (unlikely(!skb)) - return NULL; - - addr = (unsigned long) skb->data; - off = ((addr + 7UL) & ~7UL) - addr; - if (off) - skb_reserve(skb, off); - - return skb; -} - -static inline void vnet_fullcsum(struct sk_buff *skb) -{ - struct iphdr *iph = ip_hdr(skb); - int offset = skb_transport_offset(skb); - - if (skb->protocol != htons(ETH_P_IP)) - return; - if (iph->protocol != IPPROTO_TCP && - iph->protocol != IPPROTO_UDP) - return; - skb->ip_summed = CHECKSUM_NONE; - skb->csum_level = 1; - skb->csum = 0; - if (iph->protocol == IPPROTO_TCP) { - struct tcphdr *ptcp = tcp_hdr(skb); - - ptcp->check = 0; - skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); - ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - offset, IPPROTO_TCP, - skb->csum); - } else if (iph->protocol == IPPROTO_UDP) { - struct udphdr *pudp = udp_hdr(skb); - - pudp->check = 0; - skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); - pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - offset, IPPROTO_UDP, - skb->csum); - } -} - -static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) -{ - struct net_device *dev = port->vp->dev; - unsigned int len = desc->size; - unsigned int copy_len; - struct sk_buff *skb; - int maxlen; - int err; - - err = -EMSGSIZE; - if (port->tso && port->tsolen > port->rmtu) - maxlen = port->tsolen; - else - maxlen = port->rmtu; - if (unlikely(len < ETH_ZLEN || len > maxlen)) { - dev->stats.rx_length_errors++; - goto out_dropped; - } - - skb = alloc_and_align_skb(dev, len); - err = -ENOMEM; - if (unlikely(!skb)) { - dev->stats.rx_missed_errors++; - goto out_dropped; - } - - copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; - skb_put(skb, copy_len); - err = ldc_copy(port->vio.lp, LDC_COPY_IN, - skb->data, copy_len, 0, - desc->cookies, desc->ncookies); - if (unlikely(err < 0)) { - dev->stats.rx_frame_errors++; - goto out_free_skb; - } - - skb_pull(skb, VNET_PACKET_SKIP); - skb_trim(skb, len); - skb->protocol = eth_type_trans(skb, dev); - - if (vio_version_after_eq(&port->vio, 1, 8)) { - struct vio_net_dext *dext = vio_net_ext(desc); - - skb_reset_network_header(skb); - - if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { - if (skb->protocol == ETH_P_IP) { - struct iphdr *iph = ip_hdr(skb); - - iph->check = 0; - ip_send_check(iph); - } - } - if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && - skb->ip_summed == CHECKSUM_NONE) { - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - int ihl = iph->ihl * 4; - - skb_reset_transport_header(skb); - skb_set_transport_header(skb, ihl); - vnet_fullcsum(skb); - } - } - if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { - skb->ip_summed = CHECKSUM_PARTIAL; - skb->csum_level = 0; - if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) - skb->csum_level = 1; - } - } - - skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; - - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - napi_gro_receive(&port->napi, skb); - return 0; - -out_free_skb: - kfree_skb(skb); - -out_dropped: - dev->stats.rx_dropped++; - return err; -} - -static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, - u32 start, u32 end, u8 vio_dring_state) -{ - struct vio_dring_data hdr = { - .tag = { - .type = VIO_TYPE_DATA, - .stype = VIO_SUBTYPE_ACK, - .stype_env = VIO_DRING_DATA, - .sid = vio_send_sid(&port->vio), - }, - .dring_ident = dr->ident, - .start_idx = start, - .end_idx = end, - .state = vio_dring_state, - }; - int err, delay; - int retries = 0; - - hdr.seq = dr->snd_nxt; - delay = 1; - do { - err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); - if (err > 0) { - dr->snd_nxt++; - break; - } - udelay(delay); - if ((delay <<= 1) > 128) - delay = 128; - if (retries++ > VNET_MAX_RETRIES) { - pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", - port->raddr[0], port->raddr[1], - port->raddr[2], port->raddr[3], - port->raddr[4], port->raddr[5]); - break; - } - } while (err == -EAGAIN); - - if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { - port->stop_rx_idx = end; - port->stop_rx = true; - } else { - port->stop_rx_idx = 0; - port->stop_rx = false; - } - - return err; -} - -static struct vio_net_desc *get_rx_desc(struct vnet_port *port, - struct vio_dring_state *dr, - u32 index) -{ - struct vio_net_desc *desc = port->vio.desc_buf; - int err; - - err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, - (index * dr->entry_size), - dr->cookies, dr->ncookies); - if (err < 0) - return ERR_PTR(err); - - return desc; -} - -static int put_rx_desc(struct vnet_port *port, - struct vio_dring_state *dr, - struct vio_net_desc *desc, - u32 index) -{ - int err; - - err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, - (index * dr->entry_size), - dr->cookies, dr->ncookies); - if (err < 0) - return err; - - return 0; -} - -static int vnet_walk_rx_one(struct vnet_port *port, - struct vio_dring_state *dr, - u32 index, int *needs_ack) -{ - struct vio_net_desc *desc = get_rx_desc(port, dr, index); - struct vio_driver_state *vio = &port->vio; - int err; - - BUG_ON(desc == NULL); - if (IS_ERR(desc)) - return PTR_ERR(desc); - - if (desc->hdr.state != VIO_DESC_READY) - return 1; - - dma_rmb(); - - viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", - desc->hdr.state, desc->hdr.ack, - desc->size, desc->ncookies, - desc->cookies[0].cookie_addr, - desc->cookies[0].cookie_size); - - err = vnet_rx_one(port, desc); - if (err == -ECONNRESET) - return err; - desc->hdr.state = VIO_DESC_DONE; - err = put_rx_desc(port, dr, desc, index); - if (err < 0) - return err; - *needs_ack = desc->hdr.ack; - return 0; -} - -static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, - u32 start, u32 end, int *npkts, int budget) -{ - struct vio_driver_state *vio = &port->vio; - int ack_start = -1, ack_end = -1; - bool send_ack = true; - - end = (end == (u32) -1) ? vio_dring_prev(dr, start) - : vio_dring_next(dr, end); - - viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); - - while (start != end) { - int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); - if (err == -ECONNRESET) - return err; - if (err != 0) - break; - (*npkts)++; - if (ack_start == -1) - ack_start = start; - ack_end = start; - start = vio_dring_next(dr, start); - if (ack && start != end) { - err = vnet_send_ack(port, dr, ack_start, ack_end, - VIO_DRING_ACTIVE); - if (err == -ECONNRESET) - return err; - ack_start = -1; - } - if ((*npkts) >= budget) { - send_ack = false; - break; - } - } - if (unlikely(ack_start == -1)) - ack_start = ack_end = vio_dring_prev(dr, start); - if (send_ack) { - port->napi_resume = false; - return vnet_send_ack(port, dr, ack_start, ack_end, - VIO_DRING_STOPPED); - } else { - port->napi_resume = true; - port->napi_stop_idx = ack_end; - return 1; - } -} - -static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, - int budget) -{ - struct vio_dring_data *pkt = msgbuf; - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; - struct vio_driver_state *vio = &port->vio; - - viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", - pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); - - if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) - return 0; - if (unlikely(pkt->seq != dr->rcv_nxt)) { - pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", - pkt->seq, dr->rcv_nxt); - return 0; - } - - if (!port->napi_resume) - dr->rcv_nxt++; - - /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ - - return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, - npkts, budget); -} - -static int idx_is_pending(struct vio_dring_state *dr, u32 end) -{ - u32 idx = dr->cons; - int found = 0; - - while (idx != dr->prod) { - if (idx == end) { - found = 1; - break; - } - idx = vio_dring_next(dr, idx); - } - return found; -} - -static int vnet_ack(struct vnet_port *port, void *msgbuf) -{ - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct vio_dring_data *pkt = msgbuf; - struct net_device *dev; - struct vnet *vp; - u32 end; - struct vio_net_desc *desc; - struct netdev_queue *txq; - - if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) - return 0; - - end = pkt->end_idx; - vp = port->vp; - dev = vp->dev; - netif_tx_lock(dev); - if (unlikely(!idx_is_pending(dr, end))) { - netif_tx_unlock(dev); - return 0; - } - - /* sync for race conditions with vnet_start_xmit() and tell xmit it - * is time to send a trigger. - */ - dr->cons = vio_dring_next(dr, end); - desc = vio_dring_entry(dr, dr->cons); - if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { - /* vnet_start_xmit() just populated this dring but missed - * sending the "start" LDC message to the consumer. - * Send a "start" trigger on its behalf. - */ - if (__vnet_tx_trigger(port, dr->cons) > 0) - port->start_cons = false; - else - port->start_cons = true; - } else { - port->start_cons = true; - } - netif_tx_unlock(dev); - - txq = netdev_get_tx_queue(dev, port->q_index); - if (unlikely(netif_tx_queue_stopped(txq) && - vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) - return 1; - - return 0; -} - -static int vnet_nack(struct vnet_port *port, void *msgbuf) -{ - /* XXX just reset or similar XXX */ - return 0; -} - -static int handle_mcast(struct vnet_port *port, void *msgbuf) -{ - struct vio_net_mcast_info *pkt = msgbuf; - - if (pkt->tag.stype != VIO_SUBTYPE_ACK) - pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", - port->vp->dev->name, - pkt->tag.type, - pkt->tag.stype, - pkt->tag.stype_env, - pkt->tag.sid); - - return 0; -} - -/* Got back a STOPPED LDC message on port. If the queue is stopped, - * wake it up so that we'll send out another START message at the - * next TX. - */ -static void maybe_tx_wakeup(struct vnet_port *port) -{ - struct netdev_queue *txq; - - txq = netdev_get_tx_queue(port->vp->dev, port->q_index); - __netif_tx_lock(txq, smp_processor_id()); - if (likely(netif_tx_queue_stopped(txq))) { - struct vio_dring_state *dr; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - netif_tx_wake_queue(txq); - } - __netif_tx_unlock(txq); -} - -static inline bool port_is_up(struct vnet_port *vnet) +static void vnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { - struct vio_driver_state *vio = &vnet->vio; - - return !!(vio->hs_state & VIO_HS_COMPLETE); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } -static int vnet_event_napi(struct vnet_port *port, int budget) +static u32 vnet_get_msglevel(struct net_device *dev) { - struct vio_driver_state *vio = &port->vio; - int tx_wakeup, err; - int npkts = 0; - int event = (port->rx_event & LDC_EVENT_RESET); - -ldc_ctrl: - if (unlikely(event == LDC_EVENT_RESET || - event == LDC_EVENT_UP)) { - vio_link_state_change(vio, event); - - if (event == LDC_EVENT_RESET) { - vnet_port_reset(port); - vio_port_up(vio); - } - port->rx_event = 0; - return 0; - } - /* We may have multiple LDC events in rx_event. Unroll send_events() */ - event = (port->rx_event & LDC_EVENT_UP); - port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); - if (event == LDC_EVENT_UP) - goto ldc_ctrl; - event = port->rx_event; - if (!(event & LDC_EVENT_DATA_READY)) - return 0; - - /* we dont expect any other bits than RESET, UP, DATA_READY */ - BUG_ON(event != LDC_EVENT_DATA_READY); - - tx_wakeup = err = 0; - while (1) { - union { - struct vio_msg_tag tag; - u64 raw[8]; - } msgbuf; - - if (port->napi_resume) { - struct vio_dring_data *pkt = - (struct vio_dring_data *)&msgbuf; - struct vio_dring_state *dr = - &port->vio.drings[VIO_DRIVER_RX_RING]; - - pkt->tag.type = VIO_TYPE_DATA; - pkt->tag.stype = VIO_SUBTYPE_INFO; - pkt->tag.stype_env = VIO_DRING_DATA; - pkt->seq = dr->rcv_nxt; - pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); - pkt->end_idx = -1; - goto napi_resume; - } - err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); - if (unlikely(err < 0)) { - if (err == -ECONNRESET) - vio_conn_reset(vio); - break; - } - if (err == 0) - break; - viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", - msgbuf.tag.type, - msgbuf.tag.stype, - msgbuf.tag.stype_env, - msgbuf.tag.sid); - err = vio_validate_sid(vio, &msgbuf.tag); - if (err < 0) - break; -napi_resume: - if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { - if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { - if (!port_is_up(port)) { - /* failures like handshake_failure() - * may have cleaned up dring, but - * NAPI polling may bring us here. - */ - err = -ECONNRESET; - break; - } - err = vnet_rx(port, &msgbuf, &npkts, budget); - if (npkts >= budget) - break; - if (npkts == 0) - break; - } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { - err = vnet_ack(port, &msgbuf); - if (err > 0) - tx_wakeup |= err; - } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { - err = vnet_nack(port, &msgbuf); - } - } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { - if (msgbuf.tag.stype_env == VNET_MCAST_INFO) - err = handle_mcast(port, &msgbuf); - else - err = vio_control_pkt_engine(vio, &msgbuf); - if (err) - break; - } else { - err = vnet_handle_unknown(port, &msgbuf); - } - if (err == -ECONNRESET) - break; - } - if (unlikely(tx_wakeup && err != -ECONNRESET)) - maybe_tx_wakeup(port); - return npkts; -} + struct vnet *vp = netdev_priv(dev); -static int vnet_poll(struct napi_struct *napi, int budget) -{ - struct vnet_port *port = container_of(napi, struct vnet_port, napi); - struct vio_driver_state *vio = &port->vio; - int processed = vnet_event_napi(port, budget); - - if (processed < budget) { - napi_complete(napi); - port->rx_event &= ~LDC_EVENT_DATA_READY; - vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); - } - return processed; + return vp->msg_enable; } -static void vnet_event(void *arg, int event) +static void vnet_set_msglevel(struct net_device *dev, u32 value) { - struct vnet_port *port = arg; - struct vio_driver_state *vio = &port->vio; - - port->rx_event |= event; - vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); - napi_schedule(&port->napi); + struct vnet *vp = netdev_priv(dev); + vp->msg_enable = value; } -static int __vnet_tx_trigger(struct vnet_port *port, u32 start) -{ - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct vio_dring_data hdr = { - .tag = { - .type = VIO_TYPE_DATA, - .stype = VIO_SUBTYPE_INFO, - .stype_env = VIO_DRING_DATA, - .sid = vio_send_sid(&port->vio), - }, - .dring_ident = dr->ident, - .start_idx = start, - .end_idx = (u32) -1, - }; - int err, delay; - int retries = 0; - - if (port->stop_rx) { - err = vnet_send_ack(port, - &port->vio.drings[VIO_DRIVER_RX_RING], - port->stop_rx_idx, -1, - VIO_DRING_STOPPED); - if (err <= 0) - return err; - } - - hdr.seq = dr->snd_nxt; - delay = 1; - do { - err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); - if (err > 0) { - dr->snd_nxt++; - break; - } - udelay(delay); - if ((delay <<= 1) > 128) - delay = 128; - if (retries++ > VNET_MAX_RETRIES) - break; - } while (err == -EAGAIN); +static const struct ethtool_ops vnet_ethtool_ops = { + .get_drvinfo = vnet_get_drvinfo, + .get_msglevel = vnet_get_msglevel, + .set_msglevel = vnet_set_msglevel, + .get_link = ethtool_op_get_link, +}; - return err; -} +static LIST_HEAD(vnet_list); +static DEFINE_MUTEX(vnet_list_mutex); -struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) +static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) { unsigned int hash = vnet_hashfn(skb->data); struct hlist_head *hp = &vp->port_hash[hash]; struct vnet_port *port; hlist_for_each_entry_rcu(port, hp, hash) { - if (!port_is_up(port)) + if (!sunvnet_port_is_up_common(port)) continue; if (ether_addr_equal(port->raddr, skb->data)) return port; @@ -927,838 +102,64 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) list_for_each_entry_rcu(port, &vp->port_list, list) { if (!port->switch_port) continue; - if (!port_is_up(port)) + if (!sunvnet_port_is_up_common(port)) continue; return port; } return NULL; } -static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, - unsigned *pending) -{ - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct sk_buff *skb = NULL; - int i, txi; - - *pending = 0; - - txi = dr->prod; - for (i = 0; i < VNET_TX_RING_SIZE; ++i) { - struct vio_net_desc *d; - - --txi; - if (txi < 0) - txi = VNET_TX_RING_SIZE-1; - - d = vio_dring_entry(dr, txi); - - if (d->hdr.state == VIO_DESC_READY) { - (*pending)++; - continue; - } - if (port->tx_bufs[txi].skb) { - if (d->hdr.state != VIO_DESC_DONE) - pr_notice("invalid ring buffer state %d\n", - d->hdr.state); - BUG_ON(port->tx_bufs[txi].skb->next); - - port->tx_bufs[txi].skb->next = skb; - skb = port->tx_bufs[txi].skb; - port->tx_bufs[txi].skb = NULL; - - ldc_unmap(port->vio.lp, - port->tx_bufs[txi].cookies, - port->tx_bufs[txi].ncookies); - } else if (d->hdr.state == VIO_DESC_FREE) - break; - d->hdr.state = VIO_DESC_FREE; - } - return skb; -} - -static inline void vnet_free_skbs(struct sk_buff *skb) -{ - struct sk_buff *next; - - while (skb) { - next = skb->next; - skb->next = NULL; - dev_kfree_skb(skb); - skb = next; - } -} - -static void vnet_clean_timer_expire(unsigned long port0) -{ - struct vnet_port *port = (struct vnet_port *)port0; - struct sk_buff *freeskbs; - unsigned pending; - - netif_tx_lock(port->vp->dev); - freeskbs = vnet_clean_tx_ring(port, &pending); - netif_tx_unlock(port->vp->dev); - - vnet_free_skbs(freeskbs); - - if (pending) - (void)mod_timer(&port->clean_timer, - jiffies + VNET_CLEAN_TIMEOUT); - else - del_timer(&port->clean_timer); -} - -static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, - struct ldc_trans_cookie *cookies, int ncookies, - unsigned int map_perm) +/* func arg to vnet_start_xmit_common() to get the proper tx port */ +static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb, + struct net_device *dev) { - int i, nc, err, blen; - - /* header */ - blen = skb_headlen(skb); - if (blen < ETH_ZLEN) - blen = ETH_ZLEN; - blen += VNET_PACKET_SKIP; - blen += 8 - (blen & 7); - - err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, - ncookies, map_perm); - if (err < 0) - return err; - nc = err; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - u8 *vaddr; - - if (nc < ncookies) { - vaddr = kmap_atomic(skb_frag_page(f)); - blen = skb_frag_size(f); - blen += 8 - (blen & 7); - err = ldc_map_single(lp, vaddr + f->page_offset, - blen, cookies + nc, ncookies - nc, - map_perm); - kunmap_atomic(vaddr); - } else { - err = -EMSGSIZE; - } + struct vnet *vp = netdev_priv(dev); - if (err < 0) { - ldc_unmap(lp, cookies, nc); - return err; - } - nc += err; - } - return nc; + return __tx_port_find(vp, skb); } -static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) -{ - struct sk_buff *nskb; - int i, len, pad, docopy; - - len = skb->len; - pad = 0; - if (len < ETH_ZLEN) { - pad += ETH_ZLEN - skb->len; - len += pad; - } - len += VNET_PACKET_SKIP; - pad += 8 - (len & 7); - - /* make sure we have enough cookies and alignment in every frag */ - docopy = skb_shinfo(skb)->nr_frags >= ncookies; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - - docopy |= f->page_offset & 7; - } - if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || - skb_tailroom(skb) < pad || - skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { - int start = 0, offset; - __wsum csum; - - len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; - nskb = alloc_and_align_skb(skb->dev, len); - if (nskb == NULL) { - dev_kfree_skb(skb); - return NULL; - } - skb_reserve(nskb, VNET_PACKET_SKIP); - - nskb->protocol = skb->protocol; - offset = skb_mac_header(skb) - skb->data; - skb_set_mac_header(nskb, offset); - offset = skb_network_header(skb) - skb->data; - skb_set_network_header(nskb, offset); - offset = skb_transport_header(skb) - skb->data; - skb_set_transport_header(nskb, offset); - - offset = 0; - nskb->csum_offset = skb->csum_offset; - nskb->ip_summed = skb->ip_summed; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - start = skb_checksum_start_offset(skb); - if (start) { - struct iphdr *iph = ip_hdr(nskb); - int offset = start + nskb->csum_offset; - - if (skb_copy_bits(skb, 0, nskb->data, start)) { - dev_kfree_skb(nskb); - dev_kfree_skb(skb); - return NULL; - } - *(__sum16 *)(skb->data + offset) = 0; - csum = skb_copy_and_csum_bits(skb, start, - nskb->data + start, - skb->len - start, 0); - if (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) { - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - start, - iph->protocol, csum); - } - *(__sum16 *)(nskb->data + offset) = csum; - - nskb->ip_summed = CHECKSUM_NONE; - } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { - dev_kfree_skb(nskb); - dev_kfree_skb(skb); - return NULL; - } - (void)skb_put(nskb, skb->len); - if (skb_is_gso(skb)) { - skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; - skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; - } - nskb->queue_mapping = skb->queue_mapping; - dev_kfree_skb(skb); - skb = nskb; - } - return skb; -} - -static u16 -vnet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) +static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port = __tx_port_find(vp, skb); - if (port == NULL) + if (!port) return 0; - return port->q_index; -} - -static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev); - -static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) -{ - struct net_device *dev = port->vp->dev; - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct sk_buff *segs; - int maclen, datalen; - int status; - int gso_size, gso_type, gso_segs; - int hlen = skb_transport_header(skb) - skb_mac_header(skb); - int proto = IPPROTO_IP; - - if (skb->protocol == htons(ETH_P_IP)) - proto = ip_hdr(skb)->protocol; - else if (skb->protocol == htons(ETH_P_IPV6)) - proto = ipv6_hdr(skb)->nexthdr; - - if (proto == IPPROTO_TCP) - hlen += tcp_hdr(skb)->doff * 4; - else if (proto == IPPROTO_UDP) - hlen += sizeof(struct udphdr); - else { - pr_err("vnet_handle_offloads GSO with unknown transport " - "protocol %d tproto %d\n", skb->protocol, proto); - hlen = 128; /* XXX */ - } - datalen = port->tsolen - hlen; - - gso_size = skb_shinfo(skb)->gso_size; - gso_type = skb_shinfo(skb)->gso_type; - gso_segs = skb_shinfo(skb)->gso_segs; - - if (port->tso && gso_size < datalen) - gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); - - if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { - struct netdev_queue *txq; - - txq = netdev_get_tx_queue(dev, port->q_index); - netif_tx_stop_queue(txq); - if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) - return NETDEV_TX_BUSY; - netif_tx_wake_queue(txq); - } - maclen = skb_network_header(skb) - skb_mac_header(skb); - skb_pull(skb, maclen); - - if (port->tso && gso_size < datalen) { - if (skb_unclone(skb, GFP_ATOMIC)) - goto out_dropped; - - /* segment to TSO size */ - skb_shinfo(skb)->gso_size = datalen; - skb_shinfo(skb)->gso_segs = gso_segs; - } - segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); - if (IS_ERR(segs)) - goto out_dropped; - - skb_push(skb, maclen); - skb_reset_mac_header(skb); - - status = 0; - while (segs) { - struct sk_buff *curr = segs; - - segs = segs->next; - curr->next = NULL; - if (port->tso && curr->len > dev->mtu) { - skb_shinfo(curr)->gso_size = gso_size; - skb_shinfo(curr)->gso_type = gso_type; - skb_shinfo(curr)->gso_segs = - DIV_ROUND_UP(curr->len - hlen, gso_size); - } else - skb_shinfo(curr)->gso_size = 0; - - skb_push(curr, maclen); - skb_reset_mac_header(curr); - memcpy(skb_mac_header(curr), skb_mac_header(skb), - maclen); - curr->csum_start = skb_transport_header(curr) - curr->head; - if (ip_hdr(curr)->protocol == IPPROTO_TCP) - curr->csum_offset = offsetof(struct tcphdr, check); - else if (ip_hdr(curr)->protocol == IPPROTO_UDP) - curr->csum_offset = offsetof(struct udphdr, check); - - if (!(status & NETDEV_TX_MASK)) - status = vnet_start_xmit(curr, dev); - if (status & NETDEV_TX_MASK) - dev_kfree_skb_any(curr); - } - - if (!(status & NETDEV_TX_MASK)) - dev_kfree_skb_any(skb); - return status; -out_dropped: - dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + return port->q_index; } +/* Wrappers to common functions */ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct vnet *vp = netdev_priv(dev); - struct vnet_port *port = NULL; - struct vio_dring_state *dr; - struct vio_net_desc *d; - unsigned int len; - struct sk_buff *freeskbs = NULL; - int i, err, txi; - unsigned pending = 0; - struct netdev_queue *txq; - - rcu_read_lock(); - port = __tx_port_find(vp, skb); - if (unlikely(!port)) { - rcu_read_unlock(); - goto out_dropped; - } - - if (skb_is_gso(skb) && skb->len > port->tsolen) { - err = vnet_handle_offloads(port, skb); - rcu_read_unlock(); - return err; - } - - if (!skb_is_gso(skb) && skb->len > port->rmtu) { - unsigned long localmtu = port->rmtu - ETH_HLEN; - - if (vio_version_after_eq(&port->vio, 1, 3)) - localmtu -= VLAN_HLEN; - - if (skb->protocol == htons(ETH_P_IP)) { - struct flowi4 fl4; - struct rtable *rt = NULL; - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = dev->ifindex; - fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); - fl4.daddr = ip_hdr(skb)->daddr; - fl4.saddr = ip_hdr(skb)->saddr; - - rt = ip_route_output_key(dev_net(dev), &fl4); - rcu_read_unlock(); - if (!IS_ERR(rt)) { - skb_dst_set(skb, &rt->dst); - icmp_send(skb, ICMP_DEST_UNREACH, - ICMP_FRAG_NEEDED, - htonl(localmtu)); - } - } -#if IS_ENABLED(CONFIG_IPV6) - else if (skb->protocol == htons(ETH_P_IPV6)) - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); -#endif - goto out_dropped; - } - - skb = vnet_skb_shape(skb, 2); - - if (unlikely(!skb)) - goto out_dropped; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - vnet_fullcsum(skb); - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - i = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, i); - if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); - dev->stats.tx_errors++; - } - rcu_read_unlock(); - return NETDEV_TX_BUSY; - } - - d = vio_dring_cur(dr); - - txi = dr->prod; - - freeskbs = vnet_clean_tx_ring(port, &pending); - - BUG_ON(port->tx_bufs[txi].skb); - - len = skb->len; - if (len < ETH_ZLEN) - len = ETH_ZLEN; - - err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, - (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); - if (err < 0) { - netdev_info(dev, "tx buffer map error %d\n", err); - goto out_dropped; - } - - port->tx_bufs[txi].skb = skb; - skb = NULL; - port->tx_bufs[txi].ncookies = err; - - /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), - * thus it is safe to not set VIO_ACK_ENABLE for each transmission: - * the protocol itself does not require it as long as the peer - * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. - * - * An ACK for every packet in the ring is expensive as the - * sending of LDC messages is slow and affects performance. - */ - d->hdr.ack = VIO_ACK_DISABLE; - d->size = len; - d->ncookies = port->tx_bufs[txi].ncookies; - for (i = 0; i < d->ncookies; i++) - d->cookies[i] = port->tx_bufs[txi].cookies[i]; - if (vio_version_after_eq(&port->vio, 1, 7)) { - struct vio_net_dext *dext = vio_net_ext(d); - - memset(dext, 0, sizeof(*dext)); - if (skb_is_gso(port->tx_bufs[txi].skb)) { - dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) - ->gso_size; - dext->flags |= VNET_PKT_IPV4_LSO; - } - if (vio_version_after_eq(&port->vio, 1, 8) && - !port->switch_port) { - dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; - dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; - } - } - - /* This has to be a non-SMP write barrier because we are writing - * to memory which is shared with the peer LDOM. - */ - dma_wmb(); - - d->hdr.state = VIO_DESC_READY; - - /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent - * to notify the consumer that some descriptors are READY. - * After that "start" trigger, no additional triggers are needed until - * a DRING_STOPPED is received from the consumer. The dr->cons field - * (set up by vnet_ack()) has the value of the next dring index - * that has not yet been ack-ed. We send a "start" trigger here - * if, and only if, start_cons is true (reset it afterward). Conversely, - * vnet_ack() should check if the dring corresponding to cons - * is marked READY, but start_cons was false. - * If so, vnet_ack() should send out the missed "start" trigger. - * - * Note that the dma_wmb() above makes sure the cookies et al. are - * not globally visible before the VIO_DESC_READY, and that the - * stores are ordered correctly by the compiler. The consumer will - * not proceed until the VIO_DESC_READY is visible assuring that - * the consumer does not observe anything related to descriptors - * out of order. The HV trap from the LDC start trigger is the - * producer to consumer announcement that work is available to the - * consumer - */ - if (!port->start_cons) - goto ldc_start_done; /* previous trigger suffices */ - - err = __vnet_tx_trigger(port, dr->cons); - if (unlikely(err < 0)) { - netdev_info(dev, "TX trigger error %d\n", err); - d->hdr.state = VIO_DESC_FREE; - skb = port->tx_bufs[txi].skb; - port->tx_bufs[txi].skb = NULL; - dev->stats.tx_carrier_errors++; - goto out_dropped; - } - -ldc_start_done: - port->start_cons = false; - - dev->stats.tx_packets++; - dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; - - dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); - if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - netif_tx_stop_queue(txq); - if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) - netif_tx_wake_queue(txq); - } - - (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); - rcu_read_unlock(); - - vnet_free_skbs(freeskbs); - - return NETDEV_TX_OK; - -out_dropped: - if (pending) - (void)mod_timer(&port->clean_timer, - jiffies + VNET_CLEAN_TIMEOUT); - else if (port) - del_timer(&port->clean_timer); - if (port) - rcu_read_unlock(); - if (skb) - dev_kfree_skb(skb); - vnet_free_skbs(freeskbs); - dev->stats.tx_dropped++; - return NETDEV_TX_OK; -} - -static void vnet_tx_timeout(struct net_device *dev) -{ - /* XXX Implement me XXX */ -} - -static int vnet_open(struct net_device *dev) -{ - netif_carrier_on(dev); - netif_tx_start_all_queues(dev); - - return 0; -} - -static int vnet_close(struct net_device *dev) -{ - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - - return 0; -} - -static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) -{ - struct vnet_mcast_entry *m; - - for (m = vp->mcast_list; m; m = m->next) { - if (ether_addr_equal(m->addr, addr)) - return m; - } - return NULL; -} - -static void __update_mc_list(struct vnet *vp, struct net_device *dev) -{ - struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) { - struct vnet_mcast_entry *m; - - m = __vnet_mc_find(vp, ha->addr); - if (m) { - m->hit = 1; - continue; - } - - if (!m) { - m = kzalloc(sizeof(*m), GFP_ATOMIC); - if (!m) - continue; - memcpy(m->addr, ha->addr, ETH_ALEN); - m->hit = 1; - - m->next = vp->mcast_list; - vp->mcast_list = m; - } - } -} - -static void __send_mc_list(struct vnet *vp, struct vnet_port *port) -{ - struct vio_net_mcast_info info; - struct vnet_mcast_entry *m, **pp; - int n_addrs; - - memset(&info, 0, sizeof(info)); - - info.tag.type = VIO_TYPE_CTRL; - info.tag.stype = VIO_SUBTYPE_INFO; - info.tag.stype_env = VNET_MCAST_INFO; - info.tag.sid = vio_send_sid(&port->vio); - info.set = 1; - - n_addrs = 0; - for (m = vp->mcast_list; m; m = m->next) { - if (m->sent) - continue; - m->sent = 1; - memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], - m->addr, ETH_ALEN); - if (++n_addrs == VNET_NUM_MCAST) { - info.count = n_addrs; - - (void) vio_ldc_send(&port->vio, &info, - sizeof(info)); - n_addrs = 0; - } - } - if (n_addrs) { - info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, sizeof(info)); - } - - info.set = 0; - - n_addrs = 0; - pp = &vp->mcast_list; - while ((m = *pp) != NULL) { - if (m->hit) { - m->hit = 0; - pp = &m->next; - continue; - } - - memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], - m->addr, ETH_ALEN); - if (++n_addrs == VNET_NUM_MCAST) { - info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, - sizeof(info)); - n_addrs = 0; - } - - *pp = m->next; - kfree(m); - } - if (n_addrs) { - info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, sizeof(info)); - } + return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find); } static void vnet_set_rx_mode(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); - struct vnet_port *port; - - rcu_read_lock(); - list_for_each_entry_rcu(port, &vp->port_list, list) { - - if (port->switch_port) { - __update_mc_list(vp, dev); - __send_mc_list(vp, port); - break; - } - } - rcu_read_unlock(); -} - -static int vnet_change_mtu(struct net_device *dev, int new_mtu) -{ - if (new_mtu < 68 || new_mtu > 65535) - return -EINVAL; - - dev->mtu = new_mtu; - return 0; -} -static int vnet_set_mac_addr(struct net_device *dev, void *p) -{ - return -EINVAL; -} - -static void vnet_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); -} - -static u32 vnet_get_msglevel(struct net_device *dev) -{ - struct vnet *vp = netdev_priv(dev); - return vp->msg_enable; -} - -static void vnet_set_msglevel(struct net_device *dev, u32 value) -{ - struct vnet *vp = netdev_priv(dev); - vp->msg_enable = value; -} - -static const struct ethtool_ops vnet_ethtool_ops = { - .get_drvinfo = vnet_get_drvinfo, - .get_msglevel = vnet_get_msglevel, - .set_msglevel = vnet_set_msglevel, - .get_link = ethtool_op_get_link, -}; - -static void vnet_port_free_tx_bufs(struct vnet_port *port) -{ - struct vio_dring_state *dr; - int i; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - - if (dr->base == NULL) - return; - - for (i = 0; i < VNET_TX_RING_SIZE; i++) { - struct vio_net_desc *d; - void *skb = port->tx_bufs[i].skb; - - if (!skb) - continue; - - d = vio_dring_entry(dr, i); - - ldc_unmap(port->vio.lp, - port->tx_bufs[i].cookies, - port->tx_bufs[i].ncookies); - dev_kfree_skb(skb); - port->tx_bufs[i].skb = NULL; - d->hdr.state = VIO_DESC_FREE; - } - ldc_free_exp_dring(port->vio.lp, dr->base, - (dr->entry_size * dr->num_entries), - dr->cookies, dr->ncookies); - dr->base = NULL; - dr->entry_size = 0; - dr->num_entries = 0; - dr->pending = 0; - dr->ncookies = 0; -} - -static void vnet_port_reset(struct vnet_port *port) -{ - del_timer(&port->clean_timer); - vnet_port_free_tx_bufs(port); - port->rmtu = 0; - port->tso = true; - port->tsolen = 0; -} - -static int vnet_port_alloc_tx_ring(struct vnet_port *port) -{ - struct vio_dring_state *dr; - unsigned long len, elen; - int i, err, ncookies; - void *dring; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - - elen = sizeof(struct vio_net_desc) + - sizeof(struct ldc_trans_cookie) * 2; - if (vio_version_after_eq(&port->vio, 1, 7)) - elen += sizeof(struct vio_net_dext); - len = VNET_TX_RING_SIZE * elen; - - ncookies = VIO_MAX_RING_COOKIES; - dring = ldc_alloc_exp_dring(port->vio.lp, len, - dr->cookies, &ncookies, - (LDC_MAP_SHADOW | - LDC_MAP_DIRECT | - LDC_MAP_RW)); - if (IS_ERR(dring)) { - err = PTR_ERR(dring); - goto err_out; - } - - dr->base = dring; - dr->entry_size = elen; - dr->num_entries = VNET_TX_RING_SIZE; - dr->prod = dr->cons = 0; - port->start_cons = true; /* need an initial trigger */ - dr->pending = VNET_TX_RING_SIZE; - dr->ncookies = ncookies; - - for (i = 0; i < VNET_TX_RING_SIZE; ++i) { - struct vio_net_desc *d; - - d = vio_dring_entry(dr, i); - d->hdr.state = VIO_DESC_FREE; - } - return 0; - -err_out: - vnet_port_free_tx_bufs(port); - - return err; + return sunvnet_set_rx_mode_common(dev, vp); } #ifdef CONFIG_NET_POLL_CONTROLLER static void vnet_poll_controller(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); - struct vnet_port *port; - unsigned long flags; - spin_lock_irqsave(&vp->lock, flags); - if (!list_empty(&vp->port_list)) { - port = list_entry(vp->port_list.next, struct vnet_port, list); - napi_schedule(&port->napi); - } - spin_unlock_irqrestore(&vp->lock, flags); + return sunvnet_poll_controller_common(dev, vp); } #endif -static LIST_HEAD(vnet_list); -static DEFINE_MUTEX(vnet_list_mutex); static const struct net_device_ops vnet_ops = { - .ndo_open = vnet_open, - .ndo_stop = vnet_close, + .ndo_open = sunvnet_open_common, + .ndo_stop = sunvnet_close_common, .ndo_set_rx_mode = vnet_set_rx_mode, - .ndo_set_mac_address = vnet_set_mac_addr, + .ndo_set_mac_address = sunvnet_set_mac_addr_common, .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = vnet_tx_timeout, - .ndo_change_mtu = vnet_change_mtu, + .ndo_tx_timeout = sunvnet_tx_timeout_common, + .ndo_change_mtu = sunvnet_change_mtu_common, .ndo_start_xmit = vnet_start_xmit, .ndo_select_queue = vnet_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1888,15 +289,15 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp, } static struct ldc_channel_config vnet_ldc_cfg = { - .event = vnet_event, + .event = sunvnet_event_common, .mtu = 64, .mode = LDC_MODE_UNRELIABLE, }; static struct vio_driver_ops vnet_vio_ops = { - .send_attr = vnet_send_attr, - .handle_attr = vnet_handle_attr, - .handshake_complete = vnet_handshake_complete, + .send_attr = sunvnet_send_attr_common, + .handle_attr = sunvnet_handle_attr_common, + .handshake_complete = sunvnet_handshake_complete_common, }; static void print_version(void) @@ -1906,25 +307,6 @@ static void print_version(void) const char *remote_macaddr_prop = "remote-mac-address"; -static void -vnet_port_add_txq(struct vnet_port *port) -{ - struct vnet *vp = port->vp; - int n; - - n = vp->nports++; - n = n & (VNET_MAX_TXQS - 1); - port->q_index = n; - netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); -} - -static void -vnet_port_rm_txq(struct vnet_port *port) -{ - port->vp->nports--; - netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); -} - static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; @@ -1972,13 +354,14 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (err) goto err_out_free_port; - netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT); + netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common, + NAPI_POLL_WEIGHT); INIT_HLIST_NODE(&port->hash); INIT_LIST_HEAD(&port->list); switch_port = 0; - if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) + if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL)) switch_port = 1; port->switch_port = switch_port; port->tso = true; @@ -1991,7 +374,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) list_add_tail_rcu(&port->list, &vp->port_list); hlist_add_head_rcu(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); - vnet_port_add_txq(port); + sunvnet_port_add_txq_common(port); spin_unlock_irqrestore(&vp->lock, flags); dev_set_drvdata(&vdev->dev, port); @@ -1999,7 +382,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) pr_info("%s: PORT ( remote-mac %pM%s )\n", vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); - setup_timer(&port->clean_timer, vnet_clean_timer_expire, + setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common, (unsigned long)port); napi_enable(&port->napi); @@ -2022,7 +405,6 @@ static int vnet_port_remove(struct vio_dev *vdev) struct vnet_port *port = dev_get_drvdata(&vdev->dev); if (port) { - del_timer_sync(&port->vio.timer); napi_disable(&port->napi); @@ -2032,15 +414,14 @@ static int vnet_port_remove(struct vio_dev *vdev) synchronize_rcu(); del_timer_sync(&port->clean_timer); - vnet_port_rm_txq(port); + sunvnet_port_rm_txq_common(port); netif_napi_del(&port->napi); - vnet_port_free_tx_bufs(port); + sunvnet_port_free_tx_bufs_common(port); vio_ldc_free(&port->vio); dev_set_drvdata(&vdev->dev, NULL); kfree(port); - } return 0; } diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h deleted file mode 100644 index 01ca78191683..000000000000 --- a/drivers/net/ethernet/sun/sunvnet.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef _SUNVNET_H -#define _SUNVNET_H - -#include <linux/interrupt.h> - -#define DESC_NCOOKIES(entry_size) \ - ((entry_size) - sizeof(struct vio_net_desc)) - -/* length of time before we decide the hardware is borked, - * and dev->tx_timeout() should be called to fix the problem - */ -#define VNET_TX_TIMEOUT (5 * HZ) - -/* length of time (or less) we expect pending descriptors to be marked - * as VIO_DESC_DONE and skbs ready to be freed - */ -#define VNET_CLEAN_TIMEOUT ((HZ/100)+1) - -#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN) -#define VNET_TX_RING_SIZE 512 -#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) - -#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */ -#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */ - -/* VNET packets are sent in buffers with the first 6 bytes skipped - * so that after the ethernet header the IPv4/IPv6 headers are aligned - * properly. - */ -#define VNET_PACKET_SKIP 6 - -#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1) - -struct vnet_tx_entry { - struct sk_buff *skb; - unsigned int ncookies; - struct ldc_trans_cookie cookies[VNET_MAXCOOKIES]; -}; - -struct vnet; -struct vnet_port { - struct vio_driver_state vio; - - struct hlist_node hash; - u8 raddr[ETH_ALEN]; - unsigned switch_port:1; - unsigned tso:1; - unsigned __pad:14; - - struct vnet *vp; - - struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; - - struct list_head list; - - u32 stop_rx_idx; - bool stop_rx; - bool start_cons; - - struct timer_list clean_timer; - - u64 rmtu; - u16 tsolen; - - struct napi_struct napi; - u32 napi_stop_idx; - bool napi_resume; - int rx_event; - u16 q_index; -}; - -static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) -{ - return container_of(vio, struct vnet_port, vio); -} - -#define VNET_PORT_HASH_SIZE 16 -#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1) - -static inline unsigned int vnet_hashfn(u8 *mac) -{ - unsigned int val = mac[4] ^ mac[5]; - - return val & (VNET_PORT_HASH_MASK); -} - -struct vnet_mcast_entry { - u8 addr[ETH_ALEN]; - u8 sent; - u8 hit; - struct vnet_mcast_entry *next; -}; - -struct vnet { - /* Protects port_list and port_hash. */ - spinlock_t lock; - - struct net_device *dev; - - u32 msg_enable; - - struct list_head port_list; - - struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; - - struct vnet_mcast_entry *mcast_list; - - struct list_head list; - u64 local_mac; - - int nports; -}; - -#endif /* _SUNVNET_H */ diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c new file mode 100644 index 000000000000..904a5a12a85d --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -0,0 +1,1732 @@ +/* sunvnet.c: Sun LDOM Virtual Network Driver. + * + * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> + * Copyright (C) 2016 Oracle. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/mutex.h> +#include <linux/highmem.h> +#include <linux/if_vlan.h> +#define CREATE_TRACE_POINTS +#include <trace/events/sunvnet.h> + +#if IS_ENABLED(CONFIG_IPV6) +#include <linux/icmpv6.h> +#endif + +#include <net/ip.h> +#include <net/icmp.h> +#include <net/route.h> + +#include <asm/vio.h> +#include <asm/ldc.h> + +#include "sunvnet_common.h" + +/* Heuristic for the number of times to exponentially backoff and + * retry sending an LDC trigger when EAGAIN is encountered + */ +#define VNET_MAX_RETRIES 10 + +static int __vnet_tx_trigger(struct vnet_port *port, u32 start); +static void vnet_port_reset(struct vnet_port *port); + +static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) +{ + return vio_dring_avail(dr, VNET_TX_RING_SIZE); +} + +static int vnet_handle_unknown(struct vnet_port *port, void *arg) +{ + struct vio_msg_tag *pkt = arg; + + pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", + pkt->type, pkt->stype, pkt->stype_env, pkt->sid); + pr_err("Resetting connection\n"); + + ldc_disconnect(port->vio.lp); + + return -ECONNRESET; +} + +static int vnet_port_alloc_tx_ring(struct vnet_port *port); + +int sunvnet_send_attr_common(struct vio_driver_state *vio) +{ + struct vnet_port *port = to_vnet_port(vio); + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); + struct vio_net_attr_info pkt; + int framelen = ETH_FRAME_LEN; + int i, err; + + err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); + if (err) + return err; + + memset(&pkt, 0, sizeof(pkt)); + pkt.tag.type = VIO_TYPE_CTRL; + pkt.tag.stype = VIO_SUBTYPE_INFO; + pkt.tag.stype_env = VIO_ATTR_INFO; + pkt.tag.sid = vio_send_sid(vio); + if (vio_version_before(vio, 1, 2)) + pkt.xfer_mode = VIO_DRING_MODE; + else + pkt.xfer_mode = VIO_NEW_DRING_MODE; + pkt.addr_type = VNET_ADDR_ETHERMAC; + pkt.ack_freq = 0; + for (i = 0; i < 6; i++) + pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); + if (vio_version_after(vio, 1, 3)) { + if (port->rmtu) { + port->rmtu = min(VNET_MAXPACKET, port->rmtu); + pkt.mtu = port->rmtu; + } else { + port->rmtu = VNET_MAXPACKET; + pkt.mtu = port->rmtu; + } + if (vio_version_after_eq(vio, 1, 6)) + pkt.options = VIO_TX_DRING; + } else if (vio_version_before(vio, 1, 3)) { + pkt.mtu = framelen; + } else { /* v1.3 */ + pkt.mtu = framelen + VLAN_HLEN; + } + + pkt.cflags = 0; + if (vio_version_after_eq(vio, 1, 7) && port->tso) { + pkt.cflags |= VNET_LSO_IPV4_CAPAB; + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + pkt.ipv4_lso_maxlen = port->tsolen; + } + + pkt.plnk_updt = PHYSLINK_UPDATE_NONE; + + viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + "cflags[0x%04x] lso_max[%u]\n", + pkt.xfer_mode, pkt.addr_type, + (unsigned long long)pkt.addr, + pkt.ack_freq, pkt.plnk_updt, pkt.options, + (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); + + return vio_ldc_send(vio, &pkt, sizeof(pkt)); +} +EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); + +static int handle_attr_info(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + struct vnet_port *port = to_vnet_port(vio); + u64 localmtu; + u8 xfer_mode; + + viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); + + pkt->tag.sid = vio_send_sid(vio); + + xfer_mode = pkt->xfer_mode; + /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ + if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) + xfer_mode = VIO_NEW_DRING_MODE; + + /* MTU negotiation: + * < v1.3 - ETH_FRAME_LEN exactly + * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change + * pkt->mtu for ACK + * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly + */ + if (vio_version_before(vio, 1, 3)) { + localmtu = ETH_FRAME_LEN; + } else if (vio_version_after(vio, 1, 3)) { + localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; + localmtu = min(pkt->mtu, localmtu); + pkt->mtu = localmtu; + } else { /* v1.3 */ + localmtu = ETH_FRAME_LEN + VLAN_HLEN; + } + port->rmtu = localmtu; + + /* LSO negotiation */ + if (vio_version_after_eq(vio, 1, 7)) + port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); + else + port->tso = false; + if (port->tso) { + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); + if (port->tsolen < VNET_MINTSO) { + port->tso = false; + port->tsolen = 0; + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + } + pkt->ipv4_lso_maxlen = port->tsolen; + } else { + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + pkt->ipv4_lso_maxlen = 0; + } + + /* for version >= 1.6, ACK packet mode we support */ + if (vio_version_after_eq(vio, 1, 6)) { + pkt->xfer_mode = VIO_NEW_DRING_MODE; + pkt->options = VIO_TX_DRING; + } + + if (!(xfer_mode | VIO_NEW_DRING_MODE) || + pkt->addr_type != VNET_ADDR_ETHERMAC || + pkt->mtu != localmtu) { + viodbg(HS, "SEND NET ATTR NACK\n"); + + pkt->tag.stype = VIO_SUBTYPE_NACK; + + (void)vio_ldc_send(vio, pkt, sizeof(*pkt)); + + return -ECONNRESET; + } + + viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " + "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " + "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); + + pkt->tag.stype = VIO_SUBTYPE_ACK; + + return vio_ldc_send(vio, pkt, sizeof(*pkt)); +} + +static int handle_attr_ack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR ACK\n"); + + return 0; +} + +static int handle_attr_nack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR NACK\n"); + + return -ECONNRESET; +} + +int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) +{ + struct vio_net_attr_info *pkt = arg; + + switch (pkt->tag.stype) { + case VIO_SUBTYPE_INFO: + return handle_attr_info(vio, pkt); + + case VIO_SUBTYPE_ACK: + return handle_attr_ack(vio, pkt); + + case VIO_SUBTYPE_NACK: + return handle_attr_nack(vio, pkt); + + default: + return -ECONNRESET; + } +} +EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); + +void sunvnet_handshake_complete_common(struct vio_driver_state *vio) +{ + struct vio_dring_state *dr; + + dr = &vio->drings[VIO_DRIVER_RX_RING]; + dr->rcv_nxt = 1; + dr->snd_nxt = 1; + + dr = &vio->drings[VIO_DRIVER_TX_RING]; + dr->rcv_nxt = 1; + dr->snd_nxt = 1; +} +EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); + +/* The hypervisor interface that implements copying to/from imported + * memory from another domain requires that copies are done to 8-byte + * aligned buffers, and that the lengths of such copies are also 8-byte + * multiples. + * + * So we align skb->data to an 8-byte multiple and pad-out the data + * area so we can round the copy length up to the next multiple of + * 8 for the copy. + * + * The transmitter puts the actual start of the packet 6 bytes into + * the buffer it sends over, so that the IP headers after the ethernet + * header are aligned properly. These 6 bytes are not in the descriptor + * length, they are simply implied. This offset is represented using + * the VNET_PACKET_SKIP macro. + */ +static struct sk_buff *alloc_and_align_skb(struct net_device *dev, + unsigned int len) +{ + struct sk_buff *skb; + unsigned long addr, off; + + skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8); + if (unlikely(!skb)) + return NULL; + + addr = (unsigned long)skb->data; + off = ((addr + 7UL) & ~7UL) - addr; + if (off) + skb_reserve(skb, off); + + return skb; +} + +static inline void vnet_fullcsum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IP)) + return; + if (iph->protocol != IPPROTO_TCP && + iph->protocol != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (iph->protocol == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (iph->protocol == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} + +static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) +{ + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); + unsigned int len = desc->size; + unsigned int copy_len; + struct sk_buff *skb; + int maxlen; + int err; + + err = -EMSGSIZE; + if (port->tso && port->tsolen > port->rmtu) + maxlen = port->tsolen; + else + maxlen = port->rmtu; + if (unlikely(len < ETH_ZLEN || len > maxlen)) { + dev->stats.rx_length_errors++; + goto out_dropped; + } + + skb = alloc_and_align_skb(dev, len); + err = -ENOMEM; + if (unlikely(!skb)) { + dev->stats.rx_missed_errors++; + goto out_dropped; + } + + copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; + skb_put(skb, copy_len); + err = ldc_copy(port->vio.lp, LDC_COPY_IN, + skb->data, copy_len, 0, + desc->cookies, desc->ncookies); + if (unlikely(err < 0)) { + dev->stats.rx_frame_errors++; + goto out_free_skb; + } + + skb_pull(skb, VNET_PACKET_SKIP); + skb_trim(skb, len); + skb->protocol = eth_type_trans(skb, dev); + + if (vio_version_after_eq(&port->vio, 1, 8)) { + struct vio_net_dext *dext = vio_net_ext(desc); + + skb_reset_network_header(skb); + + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { + if (skb->protocol == ETH_P_IP) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + ip_send_check(iph); + } + } + if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && + skb->ip_summed == CHECKSUM_NONE) { + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + int ihl = iph->ihl * 4; + + skb_reset_transport_header(skb); + skb_set_transport_header(skb, ihl); + vnet_fullcsum(skb); + } + } + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_level = 0; + if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) + skb->csum_level = 1; + } + } + + skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + napi_gro_receive(&port->napi, skb); + return 0; + +out_free_skb: + kfree_skb(skb); + +out_dropped: + dev->stats.rx_dropped++; + return err; +} + +static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end, u8 vio_dring_state) +{ + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_ACK, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = start, + .end_idx = end, + .state = vio_dring_state, + }; + int err, delay; + int retries = 0; + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + if (retries++ > VNET_MAX_RETRIES) { + pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", + port->raddr[0], port->raddr[1], + port->raddr[2], port->raddr[3], + port->raddr[4], port->raddr[5]); + break; + } + } while (err == -EAGAIN); + + if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { + port->stop_rx_idx = end; + port->stop_rx = true; + } else { + port->stop_rx_idx = 0; + port->stop_rx = false; + } + + return err; +} + +static struct vio_net_desc *get_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index) +{ + struct vio_net_desc *desc = port->vio.desc_buf; + int err; + + err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return ERR_PTR(err); + + return desc; +} + +static int put_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + struct vio_net_desc *desc, + u32 index) +{ + int err; + + err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return err; + + return 0; +} + +static int vnet_walk_rx_one(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index, int *needs_ack) +{ + struct vio_net_desc *desc = get_rx_desc(port, dr, index); + struct vio_driver_state *vio = &port->vio; + int err; + + BUG_ON(!desc); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + if (desc->hdr.state != VIO_DESC_READY) + return 1; + + dma_rmb(); + + viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", + desc->hdr.state, desc->hdr.ack, + desc->size, desc->ncookies, + desc->cookies[0].cookie_addr, + desc->cookies[0].cookie_size); + + err = vnet_rx_one(port, desc); + if (err == -ECONNRESET) + return err; + trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, + index, desc->hdr.ack); + desc->hdr.state = VIO_DESC_DONE; + err = put_rx_desc(port, dr, desc, index); + if (err < 0) + return err; + *needs_ack = desc->hdr.ack; + return 0; +} + +static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end, int *npkts, int budget) +{ + struct vio_driver_state *vio = &port->vio; + int ack_start = -1, ack_end = -1; + bool send_ack = true; + + end = (end == (u32)-1) ? vio_dring_prev(dr, start) + : vio_dring_next(dr, end); + + viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); + + while (start != end) { + int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); + + if (err == -ECONNRESET) + return err; + if (err != 0) + break; + (*npkts)++; + if (ack_start == -1) + ack_start = start; + ack_end = start; + start = vio_dring_next(dr, start); + if (ack && start != end) { + err = vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_ACTIVE); + if (err == -ECONNRESET) + return err; + ack_start = -1; + } + if ((*npkts) >= budget) { + send_ack = false; + break; + } + } + if (unlikely(ack_start == -1)) { + ack_end = vio_dring_prev(dr, start); + ack_start = ack_end; + } + if (send_ack) { + port->napi_resume = false; + trace_vnet_tx_send_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); + return vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_STOPPED); + } else { + trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); + port->napi_resume = true; + port->napi_stop_idx = ack_end; + return 1; + } +} + +static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, + int budget) +{ + struct vio_dring_data *pkt = msgbuf; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; + struct vio_driver_state *vio = &port->vio; + + viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", + pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + if (unlikely(pkt->seq != dr->rcv_nxt)) { + pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", + pkt->seq, dr->rcv_nxt); + return 0; + } + + if (!port->napi_resume) + dr->rcv_nxt++; + + /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ + + return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, + npkts, budget); +} + +static int idx_is_pending(struct vio_dring_state *dr, u32 end) +{ + u32 idx = dr->cons; + int found = 0; + + while (idx != dr->prod) { + if (idx == end) { + found = 1; + break; + } + idx = vio_dring_next(dr, idx); + } + return found; +} + +static int vnet_ack(struct vnet_port *port, void *msgbuf) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data *pkt = msgbuf; + struct net_device *dev; + u32 end; + struct vio_net_desc *desc; + struct netdev_queue *txq; + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + + end = pkt->end_idx; + dev = VNET_PORT_TO_NET_DEVICE(port); + netif_tx_lock(dev); + if (unlikely(!idx_is_pending(dr, end))) { + netif_tx_unlock(dev); + return 0; + } + + /* sync for race conditions with vnet_start_xmit() and tell xmit it + * is time to send a trigger. + */ + trace_vnet_rx_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, end); + dr->cons = vio_dring_next(dr, end); + desc = vio_dring_entry(dr, dr->cons); + if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { + /* vnet_start_xmit() just populated this dring but missed + * sending the "start" LDC message to the consumer. + * Send a "start" trigger on its behalf. + */ + if (__vnet_tx_trigger(port, dr->cons) > 0) + port->start_cons = false; + else + port->start_cons = true; + } else { + port->start_cons = true; + } + netif_tx_unlock(dev); + + txq = netdev_get_tx_queue(dev, port->q_index); + if (unlikely(netif_tx_queue_stopped(txq) && + vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) + return 1; + + return 0; +} + +static int vnet_nack(struct vnet_port *port, void *msgbuf) +{ + /* XXX just reset or similar XXX */ + return 0; +} + +static int handle_mcast(struct vnet_port *port, void *msgbuf) +{ + struct vio_net_mcast_info *pkt = msgbuf; + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); + + if (pkt->tag.stype != VIO_SUBTYPE_ACK) + pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", + dev->name, + pkt->tag.type, + pkt->tag.stype, + pkt->tag.stype_env, + pkt->tag.sid); + + return 0; +} + +/* Got back a STOPPED LDC message on port. If the queue is stopped, + * wake it up so that we'll send out another START message at the + * next TX. + */ +static void maybe_tx_wakeup(struct vnet_port *port) +{ + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), + port->q_index); + __netif_tx_lock(txq, smp_processor_id()); + if (likely(netif_tx_queue_stopped(txq))) { + struct vio_dring_state *dr; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + netif_tx_wake_queue(txq); + } + __netif_tx_unlock(txq); +} + +bool sunvnet_port_is_up_common(struct vnet_port *vnet) +{ + struct vio_driver_state *vio = &vnet->vio; + + return !!(vio->hs_state & VIO_HS_COMPLETE); +} +EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common); + +static int vnet_event_napi(struct vnet_port *port, int budget) +{ + struct vio_driver_state *vio = &port->vio; + int tx_wakeup, err; + int npkts = 0; + int event = (port->rx_event & LDC_EVENT_RESET); + +ldc_ctrl: + if (unlikely(event == LDC_EVENT_RESET || + event == LDC_EVENT_UP)) { + vio_link_state_change(vio, event); + + if (event == LDC_EVENT_RESET) { + vnet_port_reset(port); + vio_port_up(vio); + } + port->rx_event = 0; + return 0; + } + /* We may have multiple LDC events in rx_event. Unroll send_events() */ + event = (port->rx_event & LDC_EVENT_UP); + port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP); + if (event == LDC_EVENT_UP) + goto ldc_ctrl; + event = port->rx_event; + if (!(event & LDC_EVENT_DATA_READY)) + return 0; + + /* we dont expect any other bits than RESET, UP, DATA_READY */ + BUG_ON(event != LDC_EVENT_DATA_READY); + + err = 0; + tx_wakeup = 0; + while (1) { + union { + struct vio_msg_tag tag; + u64 raw[8]; + } msgbuf; + + if (port->napi_resume) { + struct vio_dring_data *pkt = + (struct vio_dring_data *)&msgbuf; + struct vio_dring_state *dr = + &port->vio.drings[VIO_DRIVER_RX_RING]; + + pkt->tag.type = VIO_TYPE_DATA; + pkt->tag.stype = VIO_SUBTYPE_INFO; + pkt->tag.stype_env = VIO_DRING_DATA; + pkt->seq = dr->rcv_nxt; + pkt->start_idx = vio_dring_next(dr, + port->napi_stop_idx); + pkt->end_idx = -1; + goto napi_resume; + } + err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + vio_conn_reset(vio); + break; + } + if (err == 0) + break; + viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", + msgbuf.tag.type, + msgbuf.tag.stype, + msgbuf.tag.stype_env, + msgbuf.tag.sid); + err = vio_validate_sid(vio, &msgbuf.tag); + if (err < 0) + break; +napi_resume: + if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { + if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { + if (!sunvnet_port_is_up_common(port)) { + /* failures like handshake_failure() + * may have cleaned up dring, but + * NAPI polling may bring us here. + */ + err = -ECONNRESET; + break; + } + err = vnet_rx(port, &msgbuf, &npkts, budget); + if (npkts >= budget) + break; + if (npkts == 0) + break; + } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { + err = vnet_ack(port, &msgbuf); + if (err > 0) + tx_wakeup |= err; + } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { + err = vnet_nack(port, &msgbuf); + } + } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { + if (msgbuf.tag.stype_env == VNET_MCAST_INFO) + err = handle_mcast(port, &msgbuf); + else + err = vio_control_pkt_engine(vio, &msgbuf); + if (err) + break; + } else { + err = vnet_handle_unknown(port, &msgbuf); + } + if (err == -ECONNRESET) + break; + } + if (unlikely(tx_wakeup && err != -ECONNRESET)) + maybe_tx_wakeup(port); + return npkts; +} + +int sunvnet_poll_common(struct napi_struct *napi, int budget) +{ + struct vnet_port *port = container_of(napi, struct vnet_port, napi); + struct vio_driver_state *vio = &port->vio; + int processed = vnet_event_napi(port, budget); + + if (processed < budget) { + napi_complete(napi); + port->rx_event &= ~LDC_EVENT_DATA_READY; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); + } + return processed; +} +EXPORT_SYMBOL_GPL(sunvnet_poll_common); + +void sunvnet_event_common(void *arg, int event) +{ + struct vnet_port *port = arg; + struct vio_driver_state *vio = &port->vio; + + port->rx_event |= event; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); + napi_schedule(&port->napi); +} +EXPORT_SYMBOL_GPL(sunvnet_event_common); + +static int __vnet_tx_trigger(struct vnet_port *port, u32 start) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_INFO, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = start, + .end_idx = (u32)-1, + }; + int err, delay; + int retries = 0; + + if (port->stop_rx) { + trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + port->stop_rx_idx, -1); + err = vnet_send_ack(port, + &port->vio.drings[VIO_DRIVER_RX_RING], + port->stop_rx_idx, -1, + VIO_DRING_STOPPED); + if (err <= 0) + return err; + } + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + if (retries++ > VNET_MAX_RETRIES) + break; + } while (err == -EAGAIN); + trace_vnet_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, start, err); + + return err; +} + +static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, + unsigned *pending) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *skb = NULL; + int i, txi; + + *pending = 0; + + txi = dr->prod; + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + --txi; + if (txi < 0) + txi = VNET_TX_RING_SIZE - 1; + + d = vio_dring_entry(dr, txi); + + if (d->hdr.state == VIO_DESC_READY) { + (*pending)++; + continue; + } + if (port->tx_bufs[txi].skb) { + if (d->hdr.state != VIO_DESC_DONE) + pr_notice("invalid ring buffer state %d\n", + d->hdr.state); + BUG_ON(port->tx_bufs[txi].skb->next); + + port->tx_bufs[txi].skb->next = skb; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; + + ldc_unmap(port->vio.lp, + port->tx_bufs[txi].cookies, + port->tx_bufs[txi].ncookies); + } else if (d->hdr.state == VIO_DESC_FREE) { + break; + } + d->hdr.state = VIO_DESC_FREE; + } + return skb; +} + +static inline void vnet_free_skbs(struct sk_buff *skb) +{ + struct sk_buff *next; + + while (skb) { + next = skb->next; + skb->next = NULL; + dev_kfree_skb(skb); + skb = next; + } +} + +void sunvnet_clean_timer_expire_common(unsigned long port0) +{ + struct vnet_port *port = (struct vnet_port *)port0; + struct sk_buff *freeskbs; + unsigned pending; + + netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port)); + freeskbs = vnet_clean_tx_ring(port, &pending); + netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port)); + + vnet_free_skbs(freeskbs); + + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else + del_timer(&port->clean_timer); +} +EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); + +static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, + struct ldc_trans_cookie *cookies, int ncookies, + unsigned int map_perm) +{ + int i, nc, err, blen; + + /* header */ + blen = skb_headlen(skb); + if (blen < ETH_ZLEN) + blen = ETH_ZLEN; + blen += VNET_PACKET_SKIP; + blen += 8 - (blen & 7); + + err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies, + ncookies, map_perm); + if (err < 0) + return err; + nc = err; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + if (nc < ncookies) { + vaddr = kmap_atomic(skb_frag_page(f)); + blen = skb_frag_size(f); + blen += 8 - (blen & 7); + err = ldc_map_single(lp, vaddr + f->page_offset, + blen, cookies + nc, ncookies - nc, + map_perm); + kunmap_atomic(vaddr); + } else { + err = -EMSGSIZE; + } + + if (err < 0) { + ldc_unmap(lp, cookies, nc); + return err; + } + nc += err; + } + return nc; +} + +static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) +{ + struct sk_buff *nskb; + int i, len, pad, docopy; + + len = skb->len; + pad = 0; + if (len < ETH_ZLEN) { + pad += ETH_ZLEN - skb->len; + len += pad; + } + len += VNET_PACKET_SKIP; + pad += 8 - (len & 7); + + /* make sure we have enough cookies and alignment in every frag */ + docopy = skb_shinfo(skb)->nr_frags >= ncookies; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + + docopy |= f->page_offset & 7; + } + if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || + skb_tailroom(skb) < pad || + skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { + int start = 0, offset; + __wsum csum; + + len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + nskb = alloc_and_align_skb(skb->dev, len); + if (!nskb) { + dev_kfree_skb(skb); + return NULL; + } + skb_reserve(nskb, VNET_PACKET_SKIP); + + nskb->protocol = skb->protocol; + offset = skb_mac_header(skb) - skb->data; + skb_set_mac_header(nskb, offset); + offset = skb_network_header(skb) - skb->data; + skb_set_network_header(nskb, offset); + offset = skb_transport_header(skb) - skb->data; + skb_set_transport_header(nskb, offset); + + offset = 0; + nskb->csum_offset = skb->csum_offset; + nskb->ip_summed = skb->ip_summed; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + start = skb_checksum_start_offset(skb); + if (start) { + struct iphdr *iph = ip_hdr(nskb); + int offset = start + nskb->csum_offset; + + if (skb_copy_bits(skb, 0, nskb->data, start)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + *(__sum16 *)(skb->data + offset) = 0; + csum = skb_copy_and_csum_bits(skb, start, + nskb->data + start, + skb->len - start, 0); + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - start, + iph->protocol, csum); + } + *(__sum16 *)(nskb->data + offset) = csum; + + nskb->ip_summed = CHECKSUM_NONE; + } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + (void)skb_put(nskb, skb->len); + if (skb_is_gso(skb)) { + skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; + skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; + } + nskb->queue_mapping = skb->queue_mapping; + dev_kfree_skb(skb); + skb = nskb; + } + return skb; +} + +static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) +{ + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *segs; + int maclen, datalen; + int status; + int gso_size, gso_type, gso_segs; + int hlen = skb_transport_header(skb) - skb_mac_header(skb); + int proto = IPPROTO_IP; + + if (skb->protocol == htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; + else if (skb->protocol == htons(ETH_P_IPV6)) + proto = ipv6_hdr(skb)->nexthdr; + + if (proto == IPPROTO_TCP) { + hlen += tcp_hdr(skb)->doff * 4; + } else if (proto == IPPROTO_UDP) { + hlen += sizeof(struct udphdr); + } else { + pr_err("vnet_handle_offloads GSO with unknown transport " + "protocol %d tproto %d\n", skb->protocol, proto); + hlen = 128; /* XXX */ + } + datalen = port->tsolen - hlen; + + gso_size = skb_shinfo(skb)->gso_size; + gso_type = skb_shinfo(skb)->gso_type; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (port->tso && gso_size < datalen) + gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); + + if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, port->q_index); + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) + return NETDEV_TX_BUSY; + netif_tx_wake_queue(txq); + } + + maclen = skb_network_header(skb) - skb_mac_header(skb); + skb_pull(skb, maclen); + + if (port->tso && gso_size < datalen) { + if (skb_unclone(skb, GFP_ATOMIC)) + goto out_dropped; + + /* segment to TSO size */ + skb_shinfo(skb)->gso_size = datalen; + skb_shinfo(skb)->gso_segs = gso_segs; + } + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto out_dropped; + + skb_push(skb, maclen); + skb_reset_mac_header(skb); + + status = 0; + while (segs) { + struct sk_buff *curr = segs; + + segs = segs->next; + curr->next = NULL; + if (port->tso && curr->len > dev->mtu) { + skb_shinfo(curr)->gso_size = gso_size; + skb_shinfo(curr)->gso_type = gso_type; + skb_shinfo(curr)->gso_segs = + DIV_ROUND_UP(curr->len - hlen, gso_size); + } else { + skb_shinfo(curr)->gso_size = 0; + } + + skb_push(curr, maclen); + skb_reset_mac_header(curr); + memcpy(skb_mac_header(curr), skb_mac_header(skb), + maclen); + curr->csum_start = skb_transport_header(curr) - curr->head; + if (ip_hdr(curr)->protocol == IPPROTO_TCP) + curr->csum_offset = offsetof(struct tcphdr, check); + else if (ip_hdr(curr)->protocol == IPPROTO_UDP) + curr->csum_offset = offsetof(struct udphdr, check); + + if (!(status & NETDEV_TX_MASK)) + status = sunvnet_start_xmit_common(curr, dev, + vnet_tx_port); + if (status & NETDEV_TX_MASK) + dev_kfree_skb_any(curr); + } + + if (!(status & NETDEV_TX_MASK)) + dev_kfree_skb_any(skb); + return status; +out_dropped: + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) +{ + struct vnet_port *port = NULL; + struct vio_dring_state *dr; + struct vio_net_desc *d; + unsigned int len; + struct sk_buff *freeskbs = NULL; + int i, err, txi; + unsigned pending = 0; + struct netdev_queue *txq; + + rcu_read_lock(); + port = vnet_tx_port(skb, dev); + if (unlikely(!port)) { + rcu_read_unlock(); + goto out_dropped; + } + + if (skb_is_gso(skb) && skb->len > port->tsolen) { + err = vnet_handle_offloads(port, skb, vnet_tx_port); + rcu_read_unlock(); + return err; + } + + if (!skb_is_gso(skb) && skb->len > port->rmtu) { + unsigned long localmtu = port->rmtu - ETH_HLEN; + + if (vio_version_after_eq(&port->vio, 1, 3)) + localmtu -= VLAN_HLEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct flowi4 fl4; + struct rtable *rt = NULL; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = dev->ifindex; + fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); + fl4.daddr = ip_hdr(skb)->daddr; + fl4.saddr = ip_hdr(skb)->saddr; + + rt = ip_route_output_key(dev_net(dev), &fl4); + rcu_read_unlock(); + if (!IS_ERR(rt)) { + skb_dst_set(skb, &rt->dst); + icmp_send(skb, ICMP_DEST_UNREACH, + ICMP_FRAG_NEEDED, + htonl(localmtu)); + } + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); +#endif + goto out_dropped; + } + + skb = vnet_skb_shape(skb, 2); + + if (unlikely(!skb)) + goto out_dropped; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + vnet_fullcsum(skb); + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + i = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, i); + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + dev->stats.tx_errors++; + } + rcu_read_unlock(); + return NETDEV_TX_BUSY; + } + + d = vio_dring_cur(dr); + + txi = dr->prod; + + freeskbs = vnet_clean_tx_ring(port, &pending); + + BUG_ON(port->tx_bufs[txi].skb); + + len = skb->len; + if (len < ETH_ZLEN) + len = ETH_ZLEN; + + err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, + (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); + if (err < 0) { + netdev_info(dev, "tx buffer map error %d\n", err); + goto out_dropped; + } + + port->tx_bufs[txi].skb = skb; + skb = NULL; + port->tx_bufs[txi].ncookies = err; + + /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), + * thus it is safe to not set VIO_ACK_ENABLE for each transmission: + * the protocol itself does not require it as long as the peer + * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. + * + * An ACK for every packet in the ring is expensive as the + * sending of LDC messages is slow and affects performance. + */ + d->hdr.ack = VIO_ACK_DISABLE; + d->size = len; + d->ncookies = port->tx_bufs[txi].ncookies; + for (i = 0; i < d->ncookies; i++) + d->cookies[i] = port->tx_bufs[txi].cookies[i]; + if (vio_version_after_eq(&port->vio, 1, 7)) { + struct vio_net_dext *dext = vio_net_ext(d); + + memset(dext, 0, sizeof(*dext)); + if (skb_is_gso(port->tx_bufs[txi].skb)) { + dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) + ->gso_size; + dext->flags |= VNET_PKT_IPV4_LSO; + } + if (vio_version_after_eq(&port->vio, 1, 8) && + !port->switch_port) { + dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; + dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; + } + } + + /* This has to be a non-SMP write barrier because we are writing + * to memory which is shared with the peer LDOM. + */ + dma_wmb(); + + d->hdr.state = VIO_DESC_READY; + + /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent + * to notify the consumer that some descriptors are READY. + * After that "start" trigger, no additional triggers are needed until + * a DRING_STOPPED is received from the consumer. The dr->cons field + * (set up by vnet_ack()) has the value of the next dring index + * that has not yet been ack-ed. We send a "start" trigger here + * if, and only if, start_cons is true (reset it afterward). Conversely, + * vnet_ack() should check if the dring corresponding to cons + * is marked READY, but start_cons was false. + * If so, vnet_ack() should send out the missed "start" trigger. + * + * Note that the dma_wmb() above makes sure the cookies et al. are + * not globally visible before the VIO_DESC_READY, and that the + * stores are ordered correctly by the compiler. The consumer will + * not proceed until the VIO_DESC_READY is visible assuring that + * the consumer does not observe anything related to descriptors + * out of order. The HV trap from the LDC start trigger is the + * producer to consumer announcement that work is available to the + * consumer + */ + if (!port->start_cons) { /* previous trigger suffices */ + trace_vnet_skip_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, dr->cons); + goto ldc_start_done; + } + + err = __vnet_tx_trigger(port, dr->cons); + if (unlikely(err < 0)) { + netdev_info(dev, "TX trigger error %d\n", err); + d->hdr.state = VIO_DESC_FREE; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; + dev->stats.tx_carrier_errors++; + goto out_dropped; + } + +ldc_start_done: + port->start_cons = false; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; + + dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) + netif_tx_wake_queue(txq); + } + + (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); + rcu_read_unlock(); + + vnet_free_skbs(freeskbs); + + return NETDEV_TX_OK; + +out_dropped: + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else if (port) + del_timer(&port->clean_timer); + if (port) + rcu_read_unlock(); + if (skb) + dev_kfree_skb(skb); + vnet_free_skbs(freeskbs); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); + +void sunvnet_tx_timeout_common(struct net_device *dev) +{ + /* XXX Implement me XXX */ +} +EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); + +int sunvnet_open_common(struct net_device *dev) +{ + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(sunvnet_open_common); + +int sunvnet_close_common(struct net_device *dev) +{ + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(sunvnet_close_common); + +static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) +{ + struct vnet_mcast_entry *m; + + for (m = vp->mcast_list; m; m = m->next) { + if (ether_addr_equal(m->addr, addr)) + return m; + } + return NULL; +} + +static void __update_mc_list(struct vnet *vp, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + struct vnet_mcast_entry *m; + + m = __vnet_mc_find(vp, ha->addr); + if (m) { + m->hit = 1; + continue; + } + + if (!m) { + m = kzalloc(sizeof(*m), GFP_ATOMIC); + if (!m) + continue; + memcpy(m->addr, ha->addr, ETH_ALEN); + m->hit = 1; + + m->next = vp->mcast_list; + vp->mcast_list = m; + } + } +} + +static void __send_mc_list(struct vnet *vp, struct vnet_port *port) +{ + struct vio_net_mcast_info info; + struct vnet_mcast_entry *m, **pp; + int n_addrs; + + memset(&info, 0, sizeof(info)); + + info.tag.type = VIO_TYPE_CTRL; + info.tag.stype = VIO_SUBTYPE_INFO; + info.tag.stype_env = VNET_MCAST_INFO; + info.tag.sid = vio_send_sid(&port->vio); + info.set = 1; + + n_addrs = 0; + for (m = vp->mcast_list; m; m = m->next) { + if (m->sent) + continue; + m->sent = 1; + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + + (void)vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + } + if (n_addrs) { + info.count = n_addrs; + (void)vio_ldc_send(&port->vio, &info, sizeof(info)); + } + + info.set = 0; + + n_addrs = 0; + pp = &vp->mcast_list; + while ((m = *pp) != NULL) { + if (m->hit) { + m->hit = 0; + pp = &m->next; + continue; + } + + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + (void)vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + + *pp = m->next; + kfree(m); + } + if (n_addrs) { + info.count = n_addrs; + (void)vio_ldc_send(&port->vio, &info, sizeof(info)); + } +} + +void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) +{ + struct vnet_port *port; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &vp->port_list, list) { + if (port->switch_port) { + __update_mc_list(vp, dev); + __send_mc_list(vp, port); + break; + } + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); + +int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > 65535) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} +EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common); + +int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); + +void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) +{ + struct vio_dring_state *dr; + int i; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + if (!dr->base) + return; + + for (i = 0; i < VNET_TX_RING_SIZE; i++) { + struct vio_net_desc *d; + void *skb = port->tx_bufs[i].skb; + + if (!skb) + continue; + + d = vio_dring_entry(dr, i); + + ldc_unmap(port->vio.lp, + port->tx_bufs[i].cookies, + port->tx_bufs[i].ncookies); + dev_kfree_skb(skb); + port->tx_bufs[i].skb = NULL; + d->hdr.state = VIO_DESC_FREE; + } + ldc_free_exp_dring(port->vio.lp, dr->base, + (dr->entry_size * dr->num_entries), + dr->cookies, dr->ncookies); + dr->base = NULL; + dr->entry_size = 0; + dr->num_entries = 0; + dr->pending = 0; + dr->ncookies = 0; +} +EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); + +static void vnet_port_reset(struct vnet_port *port) +{ + del_timer(&port->clean_timer); + sunvnet_port_free_tx_bufs_common(port); + port->rmtu = 0; + port->tso = true; + port->tsolen = 0; +} + +static int vnet_port_alloc_tx_ring(struct vnet_port *port) +{ + struct vio_dring_state *dr; + unsigned long len, elen; + int i, err, ncookies; + void *dring; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + elen = sizeof(struct vio_net_desc) + + sizeof(struct ldc_trans_cookie) * 2; + if (vio_version_after_eq(&port->vio, 1, 7)) + elen += sizeof(struct vio_net_dext); + len = VNET_TX_RING_SIZE * elen; + + ncookies = VIO_MAX_RING_COOKIES; + dring = ldc_alloc_exp_dring(port->vio.lp, len, + dr->cookies, &ncookies, + (LDC_MAP_SHADOW | + LDC_MAP_DIRECT | + LDC_MAP_RW)); + if (IS_ERR(dring)) { + err = PTR_ERR(dring); + goto err_out; + } + + dr->base = dring; + dr->entry_size = elen; + dr->num_entries = VNET_TX_RING_SIZE; + dr->prod = 0; + dr->cons = 0; + port->start_cons = true; /* need an initial trigger */ + dr->pending = VNET_TX_RING_SIZE; + dr->ncookies = ncookies; + + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + d = vio_dring_entry(dr, i); + d->hdr.state = VIO_DESC_FREE; + } + return 0; + +err_out: + sunvnet_port_free_tx_bufs_common(port); + + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp) +{ + struct vnet_port *port; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + if (!list_empty(&vp->port_list)) { + port = list_entry(vp->port_list.next, struct vnet_port, list); + napi_schedule(&port->napi); + } + spin_unlock_irqrestore(&vp->lock, flags); +} +EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); +#endif + +void sunvnet_port_add_txq_common(struct vnet_port *port) +{ + struct vnet *vp = port->vp; + int n; + + n = vp->nports++; + n = n & (VNET_MAX_TXQS - 1); + port->q_index = n; + netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), + port->q_index)); +} +EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); + +void sunvnet_port_rm_txq_common(struct vnet_port *port) +{ + port->vp->nports--; + netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), + port->q_index)); +} +EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h new file mode 100644 index 000000000000..bd36528af972 --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -0,0 +1,145 @@ +#ifndef _SUNVNETCOMMON_H +#define _SUNVNETCOMMON_H + +#include <linux/interrupt.h> + +/* length of time (or less) we expect pending descriptors to be marked + * as VIO_DESC_DONE and skbs ready to be freed + */ +#define VNET_CLEAN_TIMEOUT ((HZ / 100) + 1) + +#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN) +#define VNET_TX_RING_SIZE 512 +#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) + +#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */ +#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */ + +/* VNET packets are sent in buffers with the first 6 bytes skipped + * so that after the ethernet header the IPv4/IPv6 headers are aligned + * properly. + */ +#define VNET_PACKET_SKIP 6 + +#define VNET_MAXCOOKIES (VNET_MAXPACKET / PAGE_SIZE + 1) + +#define VNET_MAX_TXQS 16 + +struct vnet_tx_entry { + struct sk_buff *skb; + unsigned int ncookies; + struct ldc_trans_cookie cookies[VNET_MAXCOOKIES]; +}; + +struct vnet; + +/* Structure to describe a vnet-port or vsw-port in the MD. + * If the vsw bit is set, this structure represents a vswitch + * port, and the net_device can be found from ->dev. If the + * vsw bit is not set, the net_device is available from ->vp->dev. + * See the VNET_PORT_TO_NET_DEVICE macro below. + */ +struct vnet_port { + struct vio_driver_state vio; + + struct hlist_node hash; + u8 raddr[ETH_ALEN]; + unsigned switch_port:1; + unsigned tso:1; + unsigned vsw:1; + unsigned __pad:13; + + struct vnet *vp; + struct net_device *dev; + + struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; + + struct list_head list; + + u32 stop_rx_idx; + bool stop_rx; + bool start_cons; + + struct timer_list clean_timer; + + u64 rmtu; + u16 tsolen; + + struct napi_struct napi; + u32 napi_stop_idx; + bool napi_resume; + int rx_event; + u16 q_index; +}; + +static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) +{ + return container_of(vio, struct vnet_port, vio); +} + +#define VNET_PORT_HASH_SIZE 16 +#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1) + +static inline unsigned int vnet_hashfn(u8 *mac) +{ + unsigned int val = mac[4] ^ mac[5]; + + return val & (VNET_PORT_HASH_MASK); +} + +struct vnet_mcast_entry { + u8 addr[ETH_ALEN]; + u8 sent; + u8 hit; + struct vnet_mcast_entry *next; +}; + +struct vnet { + /* Protects port_list and port_hash. */ + spinlock_t lock; + + struct net_device *dev; + + u32 msg_enable; + + struct list_head port_list; + + struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; + + struct vnet_mcast_entry *mcast_list; + + struct list_head list; + u64 local_mac; + + int nports; +}; + +/* Def used by common code to get the net_device from the proper location */ +#define VNET_PORT_TO_NET_DEVICE(__port) \ + ((__port)->vsw ? (__port)->dev : (__port)->vp->dev) + +/* Common funcs */ +void sunvnet_clean_timer_expire_common(unsigned long port0); +int sunvnet_open_common(struct net_device *dev); +int sunvnet_close_common(struct net_device *dev); +void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp); +int sunvnet_set_mac_addr_common(struct net_device *dev, void *p); +void sunvnet_tx_timeout_common(struct net_device *dev); +int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu); +int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)); +#ifdef CONFIG_NET_POLL_CONTROLLER +void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp); +#endif +void sunvnet_event_common(void *arg, int event); +int sunvnet_send_attr_common(struct vio_driver_state *vio); +int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg); +void sunvnet_handshake_complete_common(struct vio_driver_state *vio); +int sunvnet_poll_common(struct napi_struct *napi, int budget); +void sunvnet_port_free_tx_bufs_common(struct vnet_port *port); +bool sunvnet_port_is_up_common(struct vnet_port *vnet); +void sunvnet_port_add_txq_common(struct vnet_port *port); +void sunvnet_port_rm_txq_common(struct vnet_port *port); + +#endif /* _SUNVNETCOMMON_H */ diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index fc8bbff2d7e3..af11ed1e0bcc 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -426,7 +426,7 @@ #define DWC_MMC_RXOCTETCOUNT_GB 0x0784 #define DWC_MMC_RXPACKETCOUNT_GB 0x0780 -static int debug = 3; +static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); @@ -650,6 +650,11 @@ struct net_local { u32 mmc_tx_counters_mask; struct dwceqos_flowcontrol flowcontrol; + + /* Tracks the intermediate state of phy started but hardware + * init not finished yet. + */ + bool phy_defer; }; static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, @@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev) struct phy_device *phydev = lp->phy_dev; int status_change = 0; + if (lp->phy_defer) + return; + if (phydev->link) { if ((lp->speed != phydev->speed) || (lp->duplex != phydev->duplex)) { @@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp) /* Allocate DMA descriptors */ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->rx_descs_addr, 0); + &lp->rx_descs_addr, GFP_KERNEL); if (!lp->rx_descs) goto err_out; lp->rx_descs_tail_addr = lp->rx_descs_addr + @@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp) size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->tx_descs_addr, 0); + &lp->tx_descs_addr, GFP_KERNEL); if (!lp->tx_descs) goto err_out; lp->tx_descs_tail_addr = lp->tx_descs_addr + @@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp) regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); + + lp->phy_defer = false; + mutex_lock(&lp->phy_dev->lock); + phy_read_status(lp->phy_dev); + dwceqos_adjust_link(lp->ndev); + mutex_unlock(&lp->phy_dev->lock); } static void dwceqos_tx_reclaim(unsigned long data) @@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev) } netdev_reset_queue(ndev); + /* The dwceqos reset state machine requires all phy clocks to complete, + * hence the unusual init order with phy_start first. + */ + lp->phy_defer = true; + phy_start(lp->phy_dev); dwceqos_init_hw(lp); napi_enable(&lp->napi); - phy_start(lp->phy_dev); netif_start_queue(ndev); tasklet_enable(&lp->tx_bdreclaim_tasklet); @@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); - phy_stop(lp->phy_dev); - tasklet_disable(&lp->tx_bdreclaim_tasklet); - netif_stop_queue(ndev); napi_disable(&lp->napi); - dwceqos_drain_dma(lp); + /* Stop all tx before we drain the tx dma. */ + netif_tx_lock_bh(lp->ndev); + netif_stop_queue(ndev); + netif_tx_unlock_bh(lp->ndev); - netif_tx_lock(lp->ndev); + dwceqos_drain_dma(lp); dwceqos_reset_hw(lp); + phy_stop(lp->phy_dev); + dwceqos_descriptor_free(lp); - netif_tx_unlock(lp->ndev); return 0; } @@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) ((trans.initial_descriptor + trans.nr_descriptors) % DWCEQOS_TX_DCNT)); - dwceqos_tx_finalize(skb, lp, &trans); - - netdev_sent_queue(ndev, skb->len); - spin_lock_bh(&lp->tx_lock); lp->tx_free -= trans.nr_descriptors; + dwceqos_tx_finalize(skb, lp, &trans); + netdev_sent_queue(ndev, skb->len); spin_unlock_bh(&lp->tx_lock); ndev->trans_start = jiffies; diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index e9cc61e1ec74..c3e85acfdc70 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -63,8 +63,12 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, mode = AM33XX_GMII_SEL_MODE_RGMII; break; - case PHY_INTERFACE_MODE_MII: default: + dev_warn(priv->dev, + "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", + phy_modes(phy_mode)); + /* fallthrough */ + case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; }; @@ -106,8 +110,12 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, mode = AM33XX_GMII_SEL_MODE_RGMII; break; - case PHY_INTERFACE_MODE_MII: default: + dev_warn(priv->dev, + "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", + phy_modes(phy_mode)); + /* fallthrough */ + case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; }; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c61d66d38634..1d0942c53120 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -117,21 +117,17 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, *ndesc = le32_to_cpu(desc->next_desc); } -static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) +static u32 get_sw_data(int index, struct knav_dma_desc *desc) { - *pad0 = le32_to_cpu(desc->pad[0]); - *pad1 = le32_to_cpu(desc->pad[1]); - *pad2 = le32_to_cpu(desc->pad[2]); + /* No Endian conversion needed as this data is untouched by hw */ + return desc->sw_data[index]; } -static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) -{ - u64 pad64; - - pad64 = le32_to_cpu(desc->pad[0]) + - ((u64)le32_to_cpu(desc->pad[1]) << 32); - *padptr = (void *)(uintptr_t)pad64; -} +/* use these macros to get sw data */ +#define GET_SW_DATA0(desc) get_sw_data(0, desc) +#define GET_SW_DATA1(desc) get_sw_data(1, desc) +#define GET_SW_DATA2(desc) get_sw_data(2, desc) +#define GET_SW_DATA3(desc) get_sw_data(3, desc) static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, struct knav_dma_desc *desc) @@ -163,13 +159,18 @@ static void set_desc_info(u32 desc_info, u32 pkt_info, desc->packet_info = cpu_to_le32(pkt_info); } -static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) +static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc) { - desc->pad[0] = cpu_to_le32(pad0); - desc->pad[1] = cpu_to_le32(pad1); - desc->pad[2] = cpu_to_le32(pad1); + /* No Endian conversion needed as this data is untouched by hw */ + desc->sw_data[index] = data; } +/* use these macros to set sw data */ +#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc) +#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc) +#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc) +#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc) + static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, struct knav_dma_desc *desc) { @@ -581,7 +582,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, dma_addr_t dma_desc, dma_buf; unsigned int buf_len, dma_sz = sizeof(*ndesc); void *buf_ptr; - u32 pad[2]; u32 tmp; get_words(&dma_desc, 1, &desc->next_desc); @@ -593,14 +593,20 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, break; } get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); - get_pad_ptr(&buf_ptr, ndesc); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + buf_ptr = (void *)GET_SW_DATA0(ndesc); + buf_len = (int)GET_SW_DATA1(desc); dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); __free_page(buf_ptr); knav_pool_desc_put(netcp->rx_pool, desc); } - - get_pad_info(&pad[0], &pad[1], &buf_len, desc); - buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + buf_ptr = (void *)GET_SW_DATA0(desc); + buf_len = (int)GET_SW_DATA1(desc); if (buf_ptr) netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); @@ -639,7 +645,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) dma_addr_t dma_desc, dma_buff; struct netcp_packet p_info; struct sk_buff *skb; - u32 pad[2]; void *org_buf_ptr; dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); @@ -653,8 +658,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); - get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); - org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + org_buf_ptr = (void *)GET_SW_DATA0(desc); + org_buf_len = (int)GET_SW_DATA1(desc); if (unlikely(!org_buf_ptr)) { dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); @@ -679,7 +687,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) /* Fill in the page fragment list */ while (dma_desc) { struct page *page; - void *ptr; ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); if (unlikely(!ndesc)) { @@ -688,8 +695,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); - get_pad_ptr(&ptr, ndesc); - page = ptr; + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + page = (struct page *)GET_SW_DATA0(desc); if (likely(dma_buff && buf_len && page)) { dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, @@ -777,7 +786,10 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) } get_org_pkt_info(&dma, &buf_len, desc); - get_pad_ptr(&buf_ptr, desc); + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + buf_ptr = (void *)GET_SW_DATA0(desc); if (unlikely(!dma)) { dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); @@ -829,7 +841,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) struct page *page; dma_addr_t dma; void *bufptr; - u32 pad[3]; + u32 sw_data[2]; /* Allocate descriptor */ hwdesc = knav_pool_desc_get(netcp->rx_pool); @@ -846,7 +858,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bufptr = netdev_alloc_frag(primary_buf_len); - pad[2] = primary_buf_len; + sw_data[1] = primary_buf_len; if (unlikely(!bufptr)) { dev_warn_ratelimited(netcp->ndev_dev, @@ -858,9 +870,10 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (unlikely(dma_mapping_error(netcp->dev, dma))) goto fail; - pad[0] = lower_32_bits((uintptr_t)bufptr); - pad[1] = upper_32_bits((uintptr_t)bufptr); - + /* warning!!!! We are saving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + sw_data[0] = (u32)bufptr; } else { /* Allocate a secondary receive queue entry */ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); @@ -870,9 +883,11 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) } buf_len = PAGE_SIZE; dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); - pad[0] = lower_32_bits(dma); - pad[1] = upper_32_bits(dma); - pad[2] = 0; + /* warning!!!! We are saving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + sw_data[0] = (u32)page; + sw_data[1] = 0; } desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; @@ -882,7 +897,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << KNAV_DMA_DESC_RETQ_SHIFT; set_org_pkt_info(dma, buf_len, hwdesc); - set_pad_info(pad[0], pad[1], pad[2], hwdesc); + SET_SW_DATA0(sw_data[0], hwdesc); + SET_SW_DATA1(sw_data[1], hwdesc); set_desc_info(desc_info, pkt_info, hwdesc); /* Push to FDQs */ @@ -971,7 +987,6 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, unsigned int budget) { struct knav_dma_desc *desc; - void *ptr; struct sk_buff *skb; unsigned int dma_sz; dma_addr_t dma; @@ -988,8 +1003,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, continue; } - get_pad_ptr(&ptr, desc); - skb = ptr; + /* warning!!!! We are retrieving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + skb = (struct sk_buff *)GET_SW_DATA0(desc); netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) { dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); @@ -1194,10 +1211,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, } set_words(&tmp, 1, &desc->packet_info); - tmp = lower_32_bits((uintptr_t)&skb); - set_words(&tmp, 1, &desc->pad[0]); - tmp = upper_32_bits((uintptr_t)&skb); - set_words(&tmp, 1, &desc->pad[1]); + /* warning!!!! We are saving the virtual ptr in the sw_data + * field as a 32bit value. Will not work on 64bit machines + */ + SET_SW_DATA0((u32)skb, desc); if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { tmp = tx_pipe->switch_to_port; @@ -1835,22 +1852,26 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; } -static int netcp_setup_tc(struct net_device *dev, u8 num_tc) +static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) { int i; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || - (dev->real_num_tx_queues < num_tc)) + (dev->real_num_tx_queues < tc->tc)) return -EINVAL; /* Configure traffic class to queue mappings */ - if (num_tc) { - netdev_set_num_tc(dev, num_tc); - for (i = 0; i < num_tc; i++) + if (tc->tc) { + netdev_set_num_tc(dev, tc->tc); + for (i = 0; i < tc->tc; i++) netdev_set_tc_queue(dev, i, 1, i); } else { netdev_reset_tc(dev); diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3c54a2cae5df..67610270d171 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -48,7 +48,6 @@ #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/bitops.h> -#include <asm/pci-bridge.h> #include <net/checksum.h> #include "spider_net.h" |