diff options
Diffstat (limited to 'drivers/net/ethernet')
361 files changed, 17937 insertions, 9207 deletions
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 7769c05543f1..ec6eac1f8c95 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev) link->open++; info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); return ax_open(dev); } /* axnet_open */ diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 9fb7b9d4fd6c..2777289a26c0 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev) info->phy_id = info->eth_phy; info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); return ei_open(dev); } /* pcnet_open */ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index ec20611e9de2..096531a73124 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -983,10 +983,9 @@ static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct bfin_mac_local *lp = container_of(ptp, struct bfin_mac_local, caps); @@ -997,21 +996,20 @@ static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) spin_unlock_irqrestore(&lp->phc_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); + return 0; } static int bfin_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { u64 ns; unsigned long flags; struct bfin_mac_local *lp = container_of(ptp, struct bfin_mac_local, caps); - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); spin_lock_irqsave(&lp->phc_lock, flags); @@ -1039,8 +1037,8 @@ static struct ptp_clock_info bfin_ptp_caps = { .pps = 0, .adjfreq = bfin_ptp_adjfreq, .adjtime = bfin_ptp_adjtime, - .gettime = bfin_ptp_gettime, - .settime = bfin_ptp_settime, + .gettime64 = bfin_ptp_gettime, + .settime64 = bfin_ptp_settime, .enable = bfin_ptp_enable, }; diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 2b8bfeeee9cf..ae89de7deb13 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1588,7 +1588,7 @@ static int greth_of_remove(struct platform_device *of_dev) return 0; } -static struct of_device_id greth_of_match[] = { +static const struct of_device_id greth_of_match[] = { { .name = "GAISLER_ETHMAC", }, diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index f3470d96837a..bab01c849165 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -757,7 +757,7 @@ static void emac_shutdown(struct net_device *dev) /* Disable all interrupt */ writel(0, db->membase + EMAC_INT_CTL_REG); - /* clear interupt status */ + /* clear interrupt status */ reg_val = readl(db->membase + EMAC_INT_STA_REG); writel(reg_val, db->membase + EMAC_INT_STA_REG); diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index e335626e1b6b..eba070f16782 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -72,7 +72,6 @@ struct msgdma_extended_desc { #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ MSGDMA_DESC_CTL_GEN_EOP | \ MSGDMA_DESC_CTL_TR_COMP_IRQ | \ - MSGDMA_DESC_CTL_TR_ERR_IRQ | \ MSGDMA_DESC_CTL_GO) #define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \ diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 760c72c6e2ac..90a76306ad0f 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -89,7 +89,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); #define TXQUEUESTOP_THRESHHOLD 2 -static struct of_device_id altera_tse_ids[]; +static const struct of_device_id altera_tse_ids[]; static inline u32 tse_tx_avail(struct altera_tse_private *priv) { @@ -105,11 +105,11 @@ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) /* set MDIO address */ csrwr32((mii_id & 0x1f), priv->mac_dev, - tse_csroffs(mdio_phy0_addr)); + tse_csroffs(mdio_phy1_addr)); /* get the data */ return csrrd32(priv->mac_dev, - tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; + tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff; } static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, @@ -120,10 +120,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, /* set MDIO address */ csrwr32((mii_id & 0x1f), priv->mac_dev, - tse_csroffs(mdio_phy0_addr)); + tse_csroffs(mdio_phy1_addr)); /* write the data */ - csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); + csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); return 0; } @@ -376,7 +376,13 @@ static int tse_rx(struct altera_tse_private *priv, int limit) u16 pktlength; u16 pktstatus; - while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { + /* Check for count < limit first as get_rx_status is changing + * the response-fifo so we must process the next packet + * after calling get_rx_status if a response is pending. + * (reading the last byte of the response pops the value from the fifo.) + */ + while ((count < limit) && + ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { pktstatus = rxstatus >> 16; pktlength = rxstatus & 0xffff; @@ -491,28 +497,27 @@ static int tse_poll(struct napi_struct *napi, int budget) struct altera_tse_private *priv = container_of(napi, struct altera_tse_private, napi); int rxcomplete = 0; - int txcomplete = 0; unsigned long int flags; - txcomplete = tse_tx_complete(priv); + tse_tx_complete(priv); rxcomplete = tse_rx(priv, budget); - if (rxcomplete >= budget || txcomplete > 0) - return rxcomplete; + if (rxcomplete < budget) { - napi_gro_flush(napi, false); - __napi_complete(napi); + napi_gro_flush(napi, false); + __napi_complete(napi); - netdev_dbg(priv->dev, - "NAPI Complete, did %d packets with budget %d\n", - txcomplete+rxcomplete, budget); + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", + rxcomplete, budget); - spin_lock_irqsave(&priv->rxdma_irq_lock, flags); - priv->dmaops->enable_rxirq(priv); - priv->dmaops->enable_txirq(priv); - spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); - return rxcomplete + txcomplete; + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(priv); + priv->dmaops->enable_txirq(priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + } + return rxcomplete; } /* DMA TX & RX FIFO interrupt routing @@ -521,7 +526,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id) { struct net_device *dev = dev_id; struct altera_tse_private *priv; - unsigned long int flags; if (unlikely(!dev)) { pr_err("%s: invalid dev pointer\n", __func__); @@ -529,20 +533,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id) } priv = netdev_priv(dev); - /* turn off desc irqs and enable napi rx */ - spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + spin_lock(&priv->rxdma_irq_lock); + /* reset IRQs */ + priv->dmaops->clear_rxirq(priv); + priv->dmaops->clear_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); if (likely(napi_schedule_prep(&priv->napi))) { + spin_lock(&priv->rxdma_irq_lock); priv->dmaops->disable_rxirq(priv); priv->dmaops->disable_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); __napi_schedule(&priv->napi); } - /* reset IRQs */ - priv->dmaops->clear_rxirq(priv); - priv->dmaops->clear_txirq(priv); - - spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); return IRQ_HANDLED; } @@ -1099,8 +1103,12 @@ static int tse_open(struct net_device *dev) spin_lock(&priv->mac_cfg_lock); ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ if (ret) - netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); ret = init_mac(priv); spin_unlock(&priv->mac_cfg_lock); @@ -1204,8 +1212,12 @@ static int tse_shutdown(struct net_device *dev) spin_lock(&priv->tx_lock); ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ if (ret) - netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); priv->dmaops->reset_dma(priv); free_skbufs(dev); @@ -1399,7 +1411,7 @@ static int altera_tse_probe(struct platform_device *pdev) } if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", - &priv->rx_fifo_depth)) { + &priv->tx_fifo_depth)) { dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); ret = -ENXIO; goto err_free_netdev; @@ -1569,7 +1581,7 @@ static const struct altera_dmaops altera_dtype_msgdma = { .start_rxdma = msgdma_start_rxdma, }; -static struct of_device_id altera_tse_ids[] = { +static const struct of_device_id altera_tse_ids[] = { { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, }, { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, }, { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, }, diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 4c2ae2221780..94960055fa1f 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -723,13 +723,13 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) * the last correctly noting the error. */ if(status & ERR_BIT) { - /* reseting flags */ + /* resetting flags */ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt; } /* check for STP and ENP */ if(!((status & STP_BIT) && (status & ENP_BIT))){ - /* reseting flags */ + /* resetting flags */ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt; } diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h index a75092d584cc..7cdb18512407 100644 --- a/drivers/net/ethernet/amd/amd8111e.h +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -614,7 +614,7 @@ typedef enum { /* Assume contoller gets data 10 times the maximum processing time */ #define REPEAT_CNT 10 -/* amd8111e decriptor flag definitions */ +/* amd8111e descriptor flag definitions */ typedef enum { OWN_BIT = (1 << 15), diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..bc8b04f42882 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { struct pcnet32_private *lp; int i, media; - int fdx, mii, fset, dxsuflo; + int fdx, mii, fset, dxsuflo, sram; int chip_version; char *chipname; struct net_device *dev; @@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } /* initialize variables */ - fdx = mii = fset = dxsuflo = 0; + fdx = mii = fset = dxsuflo = sram = 0; chip_version = (chip_version >> 12) & 0xffff; switch (chip_version) { @@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C973"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2626: chipname = "PCnet/Home 79C978"; /* PCI */ @@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C975"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2628: chipname = "PCnet/PRO 79C976"; @@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dxsuflo = 1; } + /* + * The Am79C973/Am79C975 controllers come with 12K of SRAM + * which we can use for the Tx/Rx buffers but most importantly, + * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid + * Tx fifo underflows. + */ + if (sram) { + /* + * The SRAM is being configured in two steps. First we + * set the SRAM size in the BCR25:SRAM_SIZE bits. According + * to the datasheet, each bit corresponds to a 512-byte + * page so we can have at most 24 pages. The SRAM_SIZE + * holds the value of the upper 8 bits of the 16-bit SRAM size. + * The low 8-bits start at 0x00 and end at 0xff. So the + * address range is from 0x0000 up to 0x17ff. Therefore, + * the SRAM_SIZE is set to 0x17. The next step is to set + * the BCR26:SRAM_BND midway through so the Tx and Rx + * buffers can share the SRAM equally. + */ + a->write_bcr(ioaddr, 25, 0x17); + a->write_bcr(ioaddr, 26, 0xc); + /* And finally enable the NOUFLO bit */ + a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); + } + dev = alloc_etherdev(sizeof(*lp)); if (!dev) { ret = -ENOMEM; @@ -1708,7 +1735,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ if (!is_valid_ether_addr(dev->dev_addr)) - memset(dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(dev->dev_addr); if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" %pM", dev->dev_addr); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 29a09271b64a..34c28aac767f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -365,6 +365,8 @@ #define MAC_HWF0R_TXCOESEL_WIDTH 1 #define MAC_HWF0R_VLHASH_INDEX 4 #define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF1R_ADDR64_INDEX 14 +#define MAC_HWF1R_ADDR64_WIDTH 2 #define MAC_HWF1R_ADVTHWORD_INDEX 13 #define MAC_HWF1R_ADVTHWORD_WIDTH 1 #define MAC_HWF1R_DBGMEMA_INDEX 19 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 400757b49872..21d9497518fd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -853,6 +853,22 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) return 0; } +static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + unsigned int pr_mode, am_mode; + + pr_mode = ((netdev->flags & IFF_PROMISC) != 0); + am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); + + xgbe_set_promiscuous_mode(pdata, pr_mode); + xgbe_set_all_multicast_mode(pdata, am_mode); + + xgbe_add_mac_addresses(pdata); + + return 0; +} + static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { @@ -1068,7 +1084,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) rdesc->desc3 = 0; /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); } static void xgbe_tx_desc_init(struct xgbe_channel *channel) @@ -1101,9 +1117,24 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel) DBGPR("<--tx_desc_init\n"); } -static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) +static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata, unsigned int index) { struct xgbe_ring_desc *rdesc = rdata->rdesc; + unsigned int rx_usecs = pdata->rx_usecs; + unsigned int rx_frames = pdata->rx_frames; + unsigned int inte; + + if (!rx_usecs && !rx_frames) { + /* No coalescing, interrupt for every descriptor */ + inte = 1; + } else { + /* Set interrupt based on Rx frame coalescing setting */ + if (rx_frames && !((index + 1) % rx_frames)) + inte = 1; + else + inte = 0; + } /* Reset the Rx descriptor * Set buffer 1 (lo) address to header dma address (lo) @@ -1117,19 +1148,18 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, - rdata->interrupt ? 1 : 0); + XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); /* Since the Rx DMA engine is likely running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the descriptor */ - wmb(); + dma_wmb(); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); } static void xgbe_rx_desc_init(struct xgbe_channel *channel) @@ -1138,26 +1168,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; unsigned int start_index = ring->cur; - unsigned int rx_coalesce, rx_frames; unsigned int i; DBGPR("-->rx_desc_init\n"); - rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0; - rx_frames = pdata->rx_frames; - /* Initialize all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); - /* Set interrupt on completion bit as appropriate */ - if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) - rdata->interrupt = 0; - else - rdata->interrupt = 1; - /* Initialize Rx descriptor */ - xgbe_rx_desc_reset(rdata); + xgbe_rx_desc_reset(pdata, rdata, i); } /* Update the total number of Rx descriptors */ @@ -1358,18 +1378,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel, struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring_data *rdata; + /* Make sure everything is written before the register write */ + wmb(); + /* Issue a poll command to Tx DMA by writing address * of next immediate free descriptor */ rdata = XGBE_GET_DESC_DATA(ring, ring->cur); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, lower_32_bits(rdata->rdesc_dma)); - /* Start the Tx coalescing timer */ + /* Start the Tx timer */ if (pdata->tx_usecs && !channel->tx_timer_active) { channel->tx_timer_active = 1; - hrtimer_start(&channel->tx_timer, - ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), - HRTIMER_MODE_REL); + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); } ring->tx.xmit_more = 0; @@ -1565,7 +1587,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) * is written to the descriptor(s) before setting the OWN bit * for the first descriptor */ - wmb(); + dma_wmb(); /* Set OWN bit for the first descriptor */ rdata = XGBE_GET_DESC_DATA(ring, start_index); @@ -1577,7 +1599,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) #endif /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); ring->cur = cur_index + 1; if (!packet->skb->xmit_more || @@ -1613,7 +1635,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) return 1; /* Make sure descriptor fields are read after reading the OWN bit */ - rmb(); + dma_rmb(); #ifdef XGMAC_ENABLE_RX_DESC_DUMP xgbe_dump_rx_desc(ring, rdesc, ring->cur); @@ -2004,7 +2026,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); - netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", + netdev_notice(pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", pdata->tx_q_count, ((fifo_size + 1) * 256)); } @@ -2019,7 +2042,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); - netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", + netdev_notice(pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", pdata->rx_q_count, ((fifo_size + 1) * 256)); } @@ -2800,6 +2824,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata) * Initialize MAC related features */ xgbe_config_mac_address(pdata); + xgbe_config_rx_mode(pdata); xgbe_config_jumbo_enable(pdata); xgbe_config_flow_control(pdata); xgbe_config_mac_speed(pdata); @@ -2819,10 +2844,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->tx_complete = xgbe_tx_complete; - hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode; - hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode; - hw_if->add_mac_addresses = xgbe_add_mac_addresses; hw_if->set_mac_address = xgbe_set_mac_address; + hw_if->config_rx_mode = xgbe_config_rx_mode; hw_if->enable_rx_csum = xgbe_enable_rx_csum; hw_if->disable_rx_csum = xgbe_disable_rx_csum; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b93d4404d975..db84ddcfec84 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -129,7 +129,6 @@ static int xgbe_one_poll(struct napi_struct *, int); static int xgbe_all_poll(struct napi_struct *, int); -static void xgbe_set_rx_mode(struct net_device *); static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) { @@ -411,11 +410,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) return IRQ_HANDLED; } -static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) +static void xgbe_tx_timer(unsigned long data) { - struct xgbe_channel *channel = container_of(timer, - struct xgbe_channel, - tx_timer); + struct xgbe_channel *channel = (struct xgbe_channel *)data; struct xgbe_prv_data *pdata = channel->pdata; struct napi_struct *napi; @@ -437,8 +434,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) channel->tx_timer_active = 0; DBGPR("<--xgbe_tx_timer\n"); - - return HRTIMER_NORESTART; } static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) @@ -454,9 +449,8 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) break; DBGPR(" %s adding tx timer\n", channel->name); - hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - channel->tx_timer.function = xgbe_tx_timer; + setup_timer(&channel->tx_timer, xgbe_tx_timer, + (unsigned long)channel); } DBGPR("<--xgbe_init_tx_timers\n"); @@ -475,8 +469,7 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) break; DBGPR(" %s deleting tx timer\n", channel->name); - channel->tx_timer_active = 0; - hrtimer_cancel(&channel->tx_timer); + del_timer_sync(&channel->tx_timer); } DBGPR("<--xgbe_stop_tx_timers\n"); @@ -519,6 +512,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) RXFIFOSIZE); hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TXFIFOSIZE); + hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); @@ -553,6 +547,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) break; } + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + /* The Queue, Channel and TC counts are zero based so increment them * to get the actual number */ @@ -609,6 +618,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) } } +static int xgbe_request_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + struct net_device *netdev = pdata->netdev; + unsigned int i; + int ret; + + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, + netdev->name, pdata); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + pdata->dev_irq); + return ret; + } + + if (!pdata->per_channel_irq) + return 0; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xgbe_dma_isr, 0, + channel->dma_irq_name, channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + + return 0; + +err_irq: + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + return ret; +} + +static void xgbe_free_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + if (!pdata->per_channel_irq) + return; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); +} + void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -630,6 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_init_rx_coalesce\n"); pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); + pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; hw_if->config_rx_coalesce(pdata); @@ -810,20 +882,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) return -EINVAL; } - phy_stop(pdata->phydev); - spin_lock_irqsave(&pdata->lock, flags); if (caller == XGMAC_DRIVER_CONTEXT) netif_device_detach(netdev); netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata, 0); - /* Powerdown Tx/Rx */ hw_if->powerdown_tx(pdata); hw_if->powerdown_rx(pdata); + xgbe_napi_disable(pdata, 0); + + phy_stop(pdata->phydev); + pdata->power_down = 1; spin_unlock_irqrestore(&pdata->lock, flags); @@ -854,14 +926,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) phy_start(pdata->phydev); - /* Enable Tx/Rx */ + xgbe_napi_enable(pdata, 0); + hw_if->powerup_tx(pdata); hw_if->powerup_rx(pdata); if (caller == XGMAC_DRIVER_CONTEXT) netif_device_attach(netdev); - xgbe_napi_enable(pdata, 0); netif_tx_start_all_queues(netdev); spin_unlock_irqrestore(&pdata->lock, flags); @@ -875,26 +947,39 @@ static int xgbe_start(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct net_device *netdev = pdata->netdev; + int ret; DBGPR("-->xgbe_start\n"); - xgbe_set_rx_mode(netdev); - hw_if->init(pdata); phy_start(pdata->phydev); + xgbe_napi_enable(pdata, 1); + + ret = xgbe_request_irqs(pdata); + if (ret) + goto err_napi; + hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); xgbe_init_tx_timers(pdata); - xgbe_napi_enable(pdata, 1); netif_tx_start_all_queues(netdev); DBGPR("<--xgbe_start\n"); return 0; + +err_napi: + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + + return ret; } static void xgbe_stop(struct xgbe_prv_data *pdata) @@ -907,16 +992,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_stop\n"); - phy_stop(pdata->phydev); - netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata, 1); xgbe_stop_tx_timers(pdata); hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + xgbe_free_irqs(pdata); + + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) @@ -931,10 +1021,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) static void xgbe_restart_dev(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int i; - DBGPR("-->xgbe_restart_dev\n"); /* If not running, "restart" will happen on open */ @@ -942,19 +1028,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) return; xgbe_stop(pdata); - synchronize_irq(pdata->dev_irq); - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - synchronize_irq(channel->dma_irq); - } xgbe_free_tx_data(pdata); xgbe_free_rx_data(pdata); - /* Issue software reset to device */ - hw_if->exit(pdata); - xgbe_start(pdata); DBGPR("<--xgbe_restart_dev\n"); @@ -1283,10 +1360,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, static int xgbe_open(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel = NULL; - unsigned int i = 0; int ret; DBGPR("-->xgbe_open\n"); @@ -1329,55 +1403,14 @@ static int xgbe_open(struct net_device *netdev) INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); - /* Request interrupts */ - ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, - netdev->name, pdata); - if (ret) { - netdev_alert(netdev, "error requesting irq %d\n", - pdata->dev_irq); - goto err_rings; - } - - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - snprintf(channel->dma_irq_name, - sizeof(channel->dma_irq_name) - 1, - "%s-TxRx-%u", netdev_name(netdev), - channel->queue_index); - - ret = devm_request_irq(pdata->dev, channel->dma_irq, - xgbe_dma_isr, 0, - channel->dma_irq_name, channel); - if (ret) { - netdev_alert(netdev, - "error requesting irq %d\n", - channel->dma_irq); - goto err_irq; - } - } - } - ret = xgbe_start(pdata); if (ret) - goto err_start; + goto err_rings; DBGPR("<--xgbe_open\n"); return 0; -err_start: - hw_if->exit(pdata); - -err_irq: - if (pdata->per_channel_irq) { - /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ - for (i--, channel--; i < pdata->channel_count; i--, channel--) - devm_free_irq(pdata->dev, channel->dma_irq, channel); - } - - devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - err_rings: desc_if->free_ring_resources(pdata); @@ -1399,30 +1432,16 @@ err_phy_init: static int xgbe_close(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel; - unsigned int i; DBGPR("-->xgbe_close\n"); /* Stop the device */ xgbe_stop(pdata); - /* Issue software reset to device */ - hw_if->exit(pdata); - /* Free the ring descriptors and buffers */ desc_if->free_ring_resources(pdata); - /* Release the interrupts */ - devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - devm_free_irq(pdata->dev, channel->dma_irq, channel); - } - /* Free the channel and ring structures */ xgbe_free_channels(pdata); @@ -1511,17 +1530,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int pr_mode, am_mode; DBGPR("-->xgbe_set_rx_mode\n"); - pr_mode = ((netdev->flags & IFF_PROMISC) != 0); - am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); - - hw_if->set_promiscuous_mode(pdata, pr_mode); - hw_if->set_all_multicast_mode(pdata, am_mode); - - hw_if->add_mac_addresses(pdata); + hw_if->config_rx_mode(pdata); DBGPR("<--xgbe_set_rx_mode\n"); } @@ -1588,6 +1600,14 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu) return 0; } +static void xgbe_tx_timeout(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + netdev_warn(netdev, "tx timeout, device restarting\n"); + schedule_work(&pdata->restart_work); +} + static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *s) { @@ -1752,6 +1772,7 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = xgbe_ioctl, .ndo_change_mtu = xgbe_change_mtu, + .ndo_tx_timeout = xgbe_tx_timeout, .ndo_get_stats64 = xgbe_get_stats64, .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid, @@ -1784,11 +1805,14 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) if (desc_if->map_rx_buffer(pdata, ring, rdata)) break; - hw_if->rx_desc_reset(rdata); + hw_if->rx_desc_reset(pdata, rdata, ring->dirty); ring->dirty++; } + /* Make sure everything is written before the register write */ + wmb(); + /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); @@ -1796,16 +1820,15 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) lower_32_bits(rdata->rdesc_dma)); } -static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, +static struct sk_buff *xgbe_create_skb(struct napi_struct *napi, struct xgbe_ring_data *rdata, unsigned int *len) { - struct net_device *netdev = pdata->netdev; struct sk_buff *skb; u8 *packet; unsigned int copy_len; - skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); + skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); if (!skb) return NULL; @@ -1852,7 +1875,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) /* Make sure descriptor fields are read after reading the OWN * bit */ - rmb(); + dma_rmb(); #ifdef XGMAC_ENABLE_TX_DESC_DUMP xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); @@ -1975,7 +1998,7 @@ read_again: rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); - skb = xgbe_create_skb(pdata, rdata, &put_len); + skb = xgbe_create_skb(napi, rdata, &put_len); if (!skb) { error = 1; goto skip_data; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index ebf489351555..5f149e8ee20f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -291,7 +291,6 @@ static int xgbe_get_settings(struct net_device *netdev, return -ENODEV; ret = phy_ethtool_gset(pdata->phydev, cmd); - cmd->transceiver = XCVR_EXTERNAL; DBGPR("<--xgbe_get_settings\n"); @@ -378,18 +377,14 @@ static int xgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int riwt; DBGPR("-->xgbe_get_coalesce\n"); memset(ec, 0, sizeof(struct ethtool_coalesce)); - riwt = pdata->rx_riwt; - ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt); + ec->rx_coalesce_usecs = pdata->rx_usecs; ec->rx_max_coalesced_frames = pdata->rx_frames; - ec->tx_coalesce_usecs = pdata->tx_usecs; ec->tx_max_coalesced_frames = pdata->tx_frames; DBGPR("<--xgbe_get_coalesce\n"); @@ -403,13 +398,14 @@ static int xgbe_set_coalesce(struct net_device *netdev, struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; unsigned int rx_frames, rx_riwt, rx_usecs; - unsigned int tx_frames, tx_usecs; + unsigned int tx_frames; DBGPR("-->xgbe_set_coalesce\n"); /* Check for not supported parameters */ if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_irq) || (ec->tx_max_coalesced_frames_irq) || (ec->stats_block_coalesce_usecs) || @@ -428,28 +424,18 @@ static int xgbe_set_coalesce(struct net_device *netdev, (ec->rate_sample_interval)) return -EOPNOTSUPP; - /* Can only change rx-frames when interface is down (see - * rx_descriptor_init in xgbe-dev.c) - */ - rx_frames = pdata->rx_frames; - if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) { - netdev_alert(netdev, - "interface must be down to change rx-frames\n"); - return -EINVAL; - } - rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs); + rx_usecs = ec->rx_coalesce_usecs; rx_frames = ec->rx_max_coalesced_frames; /* Use smallest possible value if conversion resulted in zero */ - if (ec->rx_coalesce_usecs && !rx_riwt) + if (rx_usecs && !rx_riwt) rx_riwt = 1; /* Check the bounds of values for Rx */ if (rx_riwt > XGMAC_MAX_DMA_RIWT) { - rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT); netdev_alert(netdev, "rx-usec is limited to %d usecs\n", - rx_usecs); + hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT)); return -EINVAL; } if (rx_frames > pdata->rx_desc_count) { @@ -458,7 +444,6 @@ static int xgbe_set_coalesce(struct net_device *netdev, return -EINVAL; } - tx_usecs = ec->tx_coalesce_usecs; tx_frames = ec->tx_max_coalesced_frames; /* Check the bounds of values for Tx */ @@ -469,10 +454,10 @@ static int xgbe_set_coalesce(struct net_device *netdev, } pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; pdata->rx_frames = rx_frames; hw_if->config_rx_coalesce(pdata); - pdata->tx_usecs = tx_usecs; pdata->tx_frames = tx_frames; hw_if->config_tx_coalesce(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 32dd65137051..714905384900 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -374,15 +374,6 @@ static int xgbe_probe(struct platform_device *pdev) pdata->awcache = XGBE_DMA_SYS_AWCACHE; } - /* Set the DMA mask */ - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); - if (ret) { - dev_err(dev, "dma_set_mask_and_coherent failed\n"); - goto err_io; - } - /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { @@ -409,6 +400,16 @@ static int xgbe_probe(struct platform_device *pdev) /* Set default configuration data */ xgbe_default_config(pdata); + /* Set the DMA mask */ + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + ret = dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(pdata->hw_feat.dma_width)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed\n"); + goto err_io; + } + /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels @@ -490,6 +491,9 @@ static int xgbe_probe(struct platform_device *pdev) netdev->priv_flags |= IFF_UNICAST_FLT; + /* Use default watchdog timeout */ + netdev->watchdog_timeo = 0; + xgbe_init_rx_coalesce(pdata); xgbe_init_tx_coalesce(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c index f326178ef376..b03e4f58d02e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c @@ -179,7 +179,7 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta) return 0; } -static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts) +static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts) { struct xgbe_prv_data *pdata = container_of(info, struct xgbe_prv_data, @@ -193,12 +193,13 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts) spin_unlock_irqrestore(&pdata->tstamp_lock, flags); - *ts = ns_to_timespec(nsec); + *ts = ns_to_timespec64(nsec); return 0; } -static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts) +static int xgbe_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) { struct xgbe_prv_data *pdata = container_of(info, struct xgbe_prv_data, @@ -206,7 +207,7 @@ static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts) unsigned long flags; u64 nsec; - nsec = timespec_to_ns(ts); + nsec = timespec64_to_ns(ts); spin_lock_irqsave(&pdata->tstamp_lock, flags); @@ -236,8 +237,8 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata) info->max_adj = pdata->ptpclk_rate; info->adjfreq = xgbe_adjfreq; info->adjtime = xgbe_adjtime; - info->gettime = xgbe_gettime; - info->settime = xgbe_settime; + info->gettime64 = xgbe_gettime; + info->settime64 = xgbe_settime; info->enable = xgbe_enable; clock = ptp_clock_register(info, pdata->dev); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 13e8f95c077c..e62dfa2deab6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -222,7 +222,7 @@ ((_idx) & ((_ring)->rdesc_count - 1))) /* Default coalescing parameters */ -#define XGMAC_INIT_DMA_TX_USECS 50 +#define XGMAC_INIT_DMA_TX_USECS 1000 #define XGMAC_INIT_DMA_TX_FRAMES 25 #define XGMAC_MAX_DMA_RIWT 0xff @@ -325,8 +325,6 @@ struct xgbe_ring_data { struct xgbe_tx_ring_data tx; /* Tx-related data */ struct xgbe_rx_ring_data rx; /* Rx-related data */ - unsigned int interrupt; /* Interrupt indicator */ - unsigned int mapped_as_page; /* Incomplete receive save location. If the budget is exhausted @@ -410,7 +408,7 @@ struct xgbe_channel { unsigned int saved_ier; unsigned int tx_timer_active; - struct hrtimer tx_timer; + struct timer_list tx_timer; struct xgbe_ring *tx_ring; struct xgbe_ring *rx_ring; @@ -497,10 +495,8 @@ struct xgbe_mmc_stats { struct xgbe_hw_if { int (*tx_complete)(struct xgbe_ring_desc *); - int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int); - int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int); - int (*add_mac_addresses)(struct xgbe_prv_data *); int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr); + int (*config_rx_mode)(struct xgbe_prv_data *); int (*enable_rx_csum)(struct xgbe_prv_data *); int (*disable_rx_csum)(struct xgbe_prv_data *); @@ -536,8 +532,9 @@ struct xgbe_hw_if { int (*dev_read)(struct xgbe_channel *); void (*tx_desc_init)(struct xgbe_channel *); void (*rx_desc_init)(struct xgbe_channel *); - void (*rx_desc_reset)(struct xgbe_ring_data *); void (*tx_desc_reset)(struct xgbe_ring_data *); + void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *, + unsigned int); int (*is_last_desc)(struct xgbe_ring_desc *); int (*is_context_desc)(struct xgbe_ring_desc *); void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *); @@ -620,7 +617,7 @@ struct xgbe_hw_features { unsigned int mgk; /* PMT magic packet */ unsigned int mmc; /* RMON module */ unsigned int aoe; /* ARP Offload */ - unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ unsigned int eee; /* Energy Efficient Ethernet */ unsigned int tx_coe; /* Tx Checksum Offload */ unsigned int rx_coe; /* Rx Checksum Offload */ @@ -632,6 +629,7 @@ struct xgbe_hw_features { unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ unsigned int dcb; /* DCB Feature */ unsigned int sph; /* Split Header Feature */ unsigned int tso; /* TCP Segmentation Offload */ @@ -715,6 +713,7 @@ struct xgbe_prv_data { /* Rx coalescing settings */ unsigned int rx_riwt; + unsigned int rx_usecs; unsigned int rx_frames; /* Current Rx buffer size */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!efi_enabled(EFI_BOOT)) { + if (pdata->clk) { clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index ec45f3256f0e..d9bc89d69266 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -97,6 +97,8 @@ enum xgene_enet_rm { #define QCOHERENT BIT(4) #define RECOMBBUF BIT(27) +#define MAC_OFFSET 0x30 + #define BLOCK_ETH_CSR_OFFSET 0x2000 #define BLOCK_ETH_RING_IF_OFFSET 0x9000 #define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..40d3530d7f30 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -428,13 +428,23 @@ static int xgene_enet_register_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); + struct xgene_enet_desc_ring *ring; int ret; - ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq, - IRQF_SHARED, ndev->name, pdata->rx_ring); - if (ret) { - netdev_err(ndev, "rx%d interrupt request failed\n", - pdata->rx_ring->irq); + ring = pdata->rx_ring; + ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ring->irq_name, ring); + if (ret) + netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); + + if (pdata->cq_cnt) { + ring = pdata->tx_ring->cp_ring; + ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ring->irq_name, ring); + if (ret) { + netdev_err(ndev, "Failed to request irq %s\n", + ring->irq_name); + } } return ret; @@ -448,6 +458,37 @@ static void xgene_enet_free_irq(struct net_device *ndev) pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); + + if (pdata->cq_cnt) { + devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, + pdata->tx_ring->cp_ring); + } +} + +static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + napi_enable(napi); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + napi_enable(napi); + } +} + +static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + napi_disable(napi); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + napi_disable(napi); + } } static int xgene_enet_open(struct net_device *ndev) @@ -462,7 +503,7 @@ static int xgene_enet_open(struct net_device *ndev) ret = xgene_enet_register_irq(ndev); if (ret) return ret; - napi_enable(&pdata->rx_ring->napi); + xgene_enet_napi_enable(pdata); if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) phy_start(pdata->phy_dev); @@ -486,7 +527,7 @@ static int xgene_enet_close(struct net_device *ndev) else cancel_delayed_work_sync(&pdata->link_work); - napi_disable(&pdata->rx_ring->napi); + xgene_enet_napi_disable(pdata); xgene_enet_free_irq(ndev); xgene_enet_process_ring(pdata->rx_ring, -1); @@ -580,6 +621,8 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) if (ring) { if (ring->cp_ring && ring->cp_ring->cp_skb) devm_kfree(dev, ring->cp_ring->cp_skb); + if (ring->cp_ring && pdata->cq_cnt) + xgene_enet_free_desc_ring(ring->cp_ring); xgene_enet_free_desc_ring(ring); } @@ -645,9 +688,11 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_desc_ring *buf_pool = NULL; - u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; - u8 bp_bufnum = START_BP_BUFNUM; - u16 ring_id, ring_num = START_RING_NUM; + u8 cpu_bufnum = pdata->cpu_bufnum; + u8 eth_bufnum = pdata->eth_bufnum; + u8 bp_bufnum = pdata->bp_bufnum; + u16 ring_num = pdata->ring_num; + u16 ring_id; int ret; /* allocate rx descriptor ring */ @@ -671,6 +716,12 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) rx_ring->nbufpool = NUM_BUFPOOL; rx_ring->buf_pool = buf_pool; rx_ring->irq = pdata->rx_irq; + if (!pdata->cq_cnt) { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", + ndev->name); + } else { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); + } buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, sizeof(struct sk_buff *), GFP_KERNEL); if (!buf_pool->rx_skb) { @@ -692,7 +743,22 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) } pdata->tx_ring = tx_ring; - cp_ring = pdata->rx_ring; + if (!pdata->cq_cnt) { + cp_ring = pdata->rx_ring; + } else { + /* allocate tx completion descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!cp_ring) { + ret = -ENOMEM; + goto err; + } + cp_ring->irq = pdata->txc_irq; + snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); + } + cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, sizeof(struct sk_buff *), GFP_KERNEL); if (!cp_ring->cp_skb) { @@ -752,6 +818,22 @@ static const struct net_device_ops xgene_ndev_ops = { .ndo_set_mac_address = xgene_enet_set_mac_address, }; +static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata) +{ + u32 id = 0; + int ret; + + ret = device_property_read_u32(dev, "port-id", &id); + if (!ret && id > 1) { + dev_err(dev, "Incorrect port-id specified\n"); + return -ENODEV; + } + + pdata->port_id = id; + + return 0; +} + static int xgene_get_mac_address(struct device *dev, unsigned char *addr) { @@ -835,13 +917,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) return -ENOMEM; } - ret = platform_get_irq(pdev, 0); - if (ret <= 0) { - dev_err(dev, "Unable to get ENET Rx IRQ\n"); - ret = ret ? : -ENXIO; + ret = xgene_get_port_id(dev, pdata); + if (ret) return ret; - } - pdata->rx_irq = ret; if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) eth_hw_addr_random(ndev); @@ -860,19 +938,37 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) return -ENODEV; } + ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET Rx IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->rx_irq = ret; + + if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { + ret = platform_get_irq(pdev, 1); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET Tx completion IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->txc_irq = ret; + } + pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { /* Firmware may have set up the clock already. */ pdata->clk = NULL; } - base_addr = pdata->base_addr; + base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { - pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; + pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; } else { pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; @@ -928,13 +1024,60 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) pdata->mac_ops = &xgene_sgmac_ops; pdata->port_ops = &xgene_sgport_ops; pdata->rm = RM1; + pdata->cq_cnt = XGENE_MAX_TXC_RINGS; break; default: pdata->mac_ops = &xgene_xgmac_ops; pdata->port_ops = &xgene_xgport_ops; pdata->rm = RM0; + pdata->cq_cnt = XGENE_MAX_TXC_RINGS; break; } + + switch (pdata->port_id) { + case 0: + pdata->cpu_bufnum = START_CPU_BUFNUM_0; + pdata->eth_bufnum = START_ETH_BUFNUM_0; + pdata->bp_bufnum = START_BP_BUFNUM_0; + pdata->ring_num = START_RING_NUM_0; + break; + case 1: + pdata->cpu_bufnum = START_CPU_BUFNUM_1; + pdata->eth_bufnum = START_ETH_BUFNUM_1; + pdata->bp_bufnum = START_BP_BUFNUM_1; + pdata->ring_num = START_RING_NUM_1; + break; + default: + break; + } + +} + +static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + netif_napi_add(pdata->ndev, napi, xgene_enet_napi, + NAPI_POLL_WEIGHT); + } +} + +static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + netif_napi_del(napi); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + netif_napi_del(napi); + } } static int xgene_enet_probe(struct platform_device *pdev) @@ -942,7 +1085,6 @@ static int xgene_enet_probe(struct platform_device *pdev) struct net_device *ndev; struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; - struct napi_struct *napi; struct xgene_mac_ops *mac_ops; int ret; @@ -984,8 +1126,7 @@ static int xgene_enet_probe(struct platform_device *pdev) if (ret) goto err; - napi = &pdata->rx_ring->napi; - netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + xgene_enet_napi_add(pdata); mac_ops = pdata->mac_ops; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) ret = xgene_enet_mdio_config(pdata); @@ -1012,7 +1153,7 @@ static int xgene_enet_remove(struct platform_device *pdev) mac_ops->rx_disable(pdata); mac_ops->tx_disable(pdata); - netif_napi_del(&pdata->rx_ring->napi); + xgene_enet_napi_del(pdata); xgene_enet_mdio_remove(pdata); xgene_enet_delete_desc_rings(pdata); unregister_netdev(ndev); @@ -1025,14 +1166,18 @@ static int xgene_enet_remove(struct platform_device *pdev) #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_enet_acpi_match[] = { { "APMC0D05", }, + { "APMC0D30", }, + { "APMC0D31", }, { } }; MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); #endif #ifdef CONFIG_OF -static struct of_device_id xgene_enet_of_match[] = { +static const struct of_device_id xgene_enet_of_match[] = { {.compatible = "apm,xgene-enet",}, + {.compatible = "apm,xgene1-sgenet",}, + {.compatible = "apm,xgene1-xgenet",}, {}, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index c2d465c3db66..8f3d232b09bc 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -41,9 +41,18 @@ #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) #define NUM_PKT_BUF 64 #define NUM_BUFPOOL 32 -#define START_ETH_BUFNUM 2 -#define START_BP_BUFNUM 0x22 -#define START_RING_NUM 8 + +#define START_CPU_BUFNUM_0 0 +#define START_ETH_BUFNUM_0 2 +#define START_BP_BUFNUM_0 0x22 +#define START_RING_NUM_0 8 +#define START_CPU_BUFNUM_1 12 +#define START_ETH_BUFNUM_1 10 +#define START_BP_BUFNUM_1 0x2A +#define START_RING_NUM_1 264 + +#define IRQ_ID_SIZE 16 +#define XGENE_MAX_TXC_RINGS 1 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -57,6 +66,7 @@ struct xgene_enet_desc_ring { u16 tail; u16 slots; u16 irq; + char irq_name[IRQ_ID_SIZE]; u32 size; u32 state[NUM_RING_CONFIG]; void __iomem *cmd_base; @@ -111,6 +121,8 @@ struct xgene_enet_pdata { u32 cp_qcnt_hi; u32 cp_qcnt_low; u32 rx_irq; + u32 txc_irq; + u8 cq_cnt; void __iomem *eth_csr_addr; void __iomem *eth_ring_if_addr; void __iomem *eth_diag_csr_addr; @@ -125,6 +137,11 @@ struct xgene_enet_pdata { struct xgene_mac_ops *mac_ops; struct xgene_port_ops *port_ops; struct delayed_work link_work; + u32 port_id; + u8 cpu_bufnum; + u8 eth_bufnum; + u8 bp_bufnum; + u16 ring_num; }; struct xgene_indirect_ctl { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index f5d4f68c288c..f27fb6f2a93b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -226,6 +226,7 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) static void xgene_sgmac_init(struct xgene_enet_pdata *p) { u32 data, loop = 10; + u32 offset = p->port_id * 4; xgene_sgmac_reset(p); @@ -272,9 +273,9 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0); /* Bypass traffic gating */ - xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); + xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0); xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX); - xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0); + xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0); } static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) @@ -330,13 +331,14 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, u32 dst_ring_num, u16 bufpool_id) { u32 data, fpsel; + u32 offset = p->port_id * MAC_OFFSET; data = CFG_CLE_BYPASS_EN0; - xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data); + xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data); fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel); - xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data); + xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data); } static void xgene_enet_shutdown(struct xgene_enet_pdata *p) diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index daae0e016253..a65d7a60f116 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -483,8 +483,8 @@ static int bmac_suspend(struct macio_dev *mdev, pm_message_t state) bmwrite(dev, TXCFG, (config & ~TxMACEnable)); bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ /* disable rx and tx dma */ - st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ - st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ /* free some skb's */ for (i=0; i<N_RX_RING; i++) { if (bp->rx_bufs[i] != NULL) { @@ -699,8 +699,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) while (1) { cp = &bp->rx_cmds[i]; - stat = ld_le16(&cp->xfer_status); - residual = ld_le16(&cp->res_count); + stat = le16_to_cpu(cp->xfer_status); + residual = le16_to_cpu(cp->res_count); if ((stat & ACTIVE) == 0) break; nb = RX_BUFLEN - residual - 2; @@ -728,8 +728,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) skb_reserve(bp->rx_bufs[i], 2); } bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); - st_le16(&cp->res_count, 0); - st_le16(&cp->xfer_status, 0); + cp->res_count = cpu_to_le16(0); + cp->xfer_status = cpu_to_le16(0); last = i; if (++i >= N_RX_RING) i = 0; } @@ -769,7 +769,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) while (1) { cp = &bp->tx_cmds[bp->tx_empty]; - stat = ld_le16(&cp->xfer_status); + stat = le16_to_cpu(cp->xfer_status); if (txintcount < 10) { XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); } @@ -1411,8 +1411,8 @@ static int bmac_close(struct net_device *dev) bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ /* disable rx and tx dma */ - st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ - st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ /* free some skb's */ XXDEBUG(("bmac: free rx bufs\n")); @@ -1493,7 +1493,7 @@ static void bmac_tx_timeout(unsigned long data) cp = &bp->tx_cmds[bp->tx_empty]; /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ -/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */ +/* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */ /* mb->pr, mb->xmtfs, mb->fifofc)); */ /* turn off both tx and rx and reset the chip */ @@ -1506,7 +1506,7 @@ static void bmac_tx_timeout(unsigned long data) bmac_enable_and_reset_chip(dev); /* restart rx dma */ - cp = bus_to_virt(ld_le32(&rd->cmdptr)); + cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); out_le16(&cp->xfer_status, 0); out_le32(&rd->cmdptr, virt_to_bus(cp)); @@ -1553,10 +1553,10 @@ static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) ip = (int*)(cp+i); printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", - ld_le32(ip+0), - ld_le32(ip+1), - ld_le32(ip+2), - ld_le32(ip+3)); + le32_to_cpup(ip+0), + le32_to_cpup(ip+1), + le32_to_cpup(ip+2), + le32_to_cpup(ip+3)); } } @@ -1621,7 +1621,7 @@ static int bmac_remove(struct macio_dev *mdev) return 0; } -static struct of_device_id bmac_match[] = +static const struct of_device_id bmac_match[] = { { .name = "bmac", diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 842fe7684904..e58a7c73766e 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -310,7 +310,7 @@ static void dbdma_reset(volatile struct dbdma_regs __iomem *dma) * way on some machines. */ for (i = 200; i > 0; --i) - if (ld_le32(&dma->control) & RUN) + if (le32_to_cpu(dma->control) & RUN) udelay(1); } @@ -452,21 +452,21 @@ static int mace_open(struct net_device *dev) data = skb->data; } mp->rx_bufs[i] = skb; - st_le16(&cp->req_count, RX_BUFLEN); - st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); - st_le32(&cp->phy_addr, virt_to_bus(data)); + cp->req_count = cpu_to_le16(RX_BUFLEN); + cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS); + cp->phy_addr = cpu_to_le32(virt_to_bus(data)); cp->xfer_status = 0; ++cp; } mp->rx_bufs[i] = NULL; - st_le16(&cp->command, DBDMA_STOP); + cp->command = cpu_to_le16(DBDMA_STOP); mp->rx_fill = i; mp->rx_empty = 0; /* Put a branch back to the beginning of the receive command list */ ++cp; - st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); - st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds)); + cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); + cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds)); /* start rx dma */ out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ @@ -475,8 +475,8 @@ static int mace_open(struct net_device *dev) /* put a branch at the end of the tx command list */ cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; - st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); - st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds)); + cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); + cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds)); /* reset tx dma */ out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); @@ -507,8 +507,8 @@ static int mace_close(struct net_device *dev) out_8(&mb->imr, 0xff); /* disable all intrs */ /* disable rx and tx dma */ - st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ - st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ + rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ + td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ mace_clean_rings(mp); @@ -558,8 +558,8 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) } mp->tx_bufs[fill] = skb; cp = mp->tx_cmds + NCMDS_TX * fill; - st_le16(&cp->req_count, len); - st_le32(&cp->phy_addr, virt_to_bus(skb->data)); + cp->req_count = cpu_to_le16(len); + cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data)); np = mp->tx_cmds + NCMDS_TX * next; out_le16(&np->command, DBDMA_STOP); @@ -691,7 +691,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) out_8(&mb->xmtfc, AUTO_PAD_XMIT); continue; } - dstat = ld_le32(&td->status); + dstat = le32_to_cpu(td->status); /* stop DMA controller */ out_le32(&td->control, RUN << 16); /* @@ -720,11 +720,11 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) mace_reset(dev); /* * XXX mace likes to hang the machine after a xmtfs error. - * This is hard to reproduce, reseting *may* help + * This is hard to reproduce, resetting *may* help */ } cp = mp->tx_cmds + NCMDS_TX * i; - stat = ld_le16(&cp->xfer_status); + stat = le16_to_cpu(cp->xfer_status); if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { /* * Check whether there were in fact 2 bytes written to @@ -830,7 +830,7 @@ static void mace_tx_timeout(unsigned long data) mace_reset(dev); /* restart rx dma */ - cp = bus_to_virt(ld_le32(&rd->cmdptr)); + cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); dbdma_reset(rd); out_le16(&cp->xfer_status, 0); out_le32(&rd->cmdptr, virt_to_bus(cp)); @@ -889,20 +889,20 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) spin_lock_irqsave(&mp->lock, flags); for (i = mp->rx_empty; i != mp->rx_fill; ) { cp = mp->rx_cmds + i; - stat = ld_le16(&cp->xfer_status); + stat = le16_to_cpu(cp->xfer_status); if ((stat & ACTIVE) == 0) { next = i + 1; if (next >= N_RX_RING) next = 0; np = mp->rx_cmds + next; if (next != mp->rx_fill && - (ld_le16(&np->xfer_status) & ACTIVE) != 0) { + (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) { printk(KERN_DEBUG "mace: lost a status word\n"); ++mace_lost_status; } else break; } - nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count); + nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count); out_le16(&cp->command, DBDMA_STOP); /* got a packet, have a look at it */ skb = mp->rx_bufs[i]; @@ -962,13 +962,13 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) mp->rx_bufs[i] = skb; } } - st_le16(&cp->req_count, RX_BUFLEN); + cp->req_count = cpu_to_le16(RX_BUFLEN); data = skb? skb->data: dummy_buf; - st_le32(&cp->phy_addr, virt_to_bus(data)); + cp->phy_addr = cpu_to_le32(virt_to_bus(data)); out_le16(&cp->xfer_status, 0); out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); #if 0 - if ((ld_le32(&rd->status) & ACTIVE) != 0) { + if ((le32_to_cpu(rd->status) & ACTIVE) != 0) { out_le32(&rd->control, (PAUSE << 16) | PAUSE); while ((in_le32(&rd->status) & ACTIVE) != 0) ; @@ -984,7 +984,7 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static struct of_device_id mace_match[] = +static const struct of_device_id mace_match[] = { { .name = "mace", diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 6e66127e6abf..89914ca17a49 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -575,7 +575,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) mace_reset(dev); /* * XXX mace likes to hang the machine after a xmtfs error. - * This is hard to reproduce, reseting *may* help + * This is hard to reproduce, resetting *may* help */ } /* dma should have finished */ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index 52fdfe225978..a8b80c56ac25 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) /* * atl1c_read_phy_core - * core funtion to read register in PHY via MDIO control regsiter. + * core function to read register in PHY via MDIO control regsiter. * ext: extension register (see IEEE 802.3) * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) * reg: reg to read @@ -356,7 +356,7 @@ int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, /* * atl1c_write_phy_core - * core funtion to write to register in PHY via MDIO control regsiter. + * core function to write to register in PHY via MDIO control register. * ext: extension register (see IEEE 802.3) * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) * reg: reg to write diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 587f63e87588..932bd1862f7a 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -752,7 +752,7 @@ static void atl1c_patch_assign(struct atl1c_hw *hw) if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 && hw->revision_id == L2CB_V21) { - /* config acess mode */ + /* config access mode */ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, REG_PCIE_DEV_MISC_CTRL); pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 41a3c9804427..a6f9142b9048 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -71,12 +71,12 @@ config BCMGENET Broadcom BCM7xxx Set Top Box family chipset. config BNX2 - tristate "QLogic NetXtremeII support" + tristate "QLogic bnx2 support" depends on PCI select CRC32 select FW_LOADER ---help--- - This driver supports QLogic NetXtremeII gigabit Ethernet cards. + This driver supports QLogic bnx2 gigabit Ethernet cards. To compile this driver as a module, choose M here: the module will be called bnx2. This is recommended. @@ -87,8 +87,8 @@ config CNIC select BNX2 select UIO ---help--- - This driver supports offload features of QLogic NetXtremeII - gigabit Ethernet cards. + This driver supports offload features of QLogic bnx2 gigabit + Ethernet cards. To compile this driver as a module, choose M here: the module will be called cnic. This is recommended. @@ -142,7 +142,7 @@ config BNX2X_SRIOV config BGMAC tristate "BCMA bus GBit core support" - depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX + depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) select PHYLIB ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index bd5916a60cb5..77363d680532 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -400,7 +400,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) } #ifdef CONFIG_BCM47XX -#include <bcm47xx_nvram.h> +#include <linux/bcm47xx_nvram.h> static void b44_wap54g10_workaround(struct b44 *bp) { char buf[20]; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) { struct bcm_enet_priv *priv; struct net_device *dev; - int tx_work_done, rx_work_done; + int rx_work_done; priv = container_of(napi, struct bcm_enet_priv, napi); dev = priv->net_dev; @@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ - tx_work_done = bcm_enet_tx_reclaim(dev, 0); + bcm_enet_tx_reclaim(dev, 0); spin_lock(&priv->rx_lock); rx_work_done = bcm_enet_receive_queue(dev, budget); spin_unlock(&priv->rx_lock); - if (rx_work_done >= budget || tx_work_done > 0) { - /* rx/tx queue is not yet empty/clean */ + if (rx_work_done >= budget) { + /* rx queue is not yet empty/clean */ return rx_work_done; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5b308a4a4d0e..783543ad1fcf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* RBUF misc statistics */ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), - STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), - STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), - STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), + STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), + STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), }; #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) @@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) s = &bcm_sysport_gstrings_stats[i]; switch (s->type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_SOFT: continue; case BCM_SYSPORT_STAT_MIB_RX: case BCM_SYSPORT_STAT_MIB_TX: diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index fc19417d82a5..7e3d87a88c76 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -570,6 +570,7 @@ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_RUNT, BCM_SYSPORT_STAT_RXCHK, BCM_SYSPORT_STAT_RBUF, + BCM_SYSPORT_STAT_SOFT, }; /* Macros to help define ethtool statistics */ @@ -590,6 +591,7 @@ enum bcm_sysport_stat_type { #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) +#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT) #define STAT_RXCHK(str, m, ofs) { \ .stat_string = str, \ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..de77d3a74abc 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -14,9 +14,10 @@ #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> -#include <bcm47xx_nvram.h> +#include <linux/bcm47xx_nvram.h> static const struct bcma_device_id bgmac_bcma_tbl[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), @@ -114,54 +115,89 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac, bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); } +static void +bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, + int i, int len, u32 ctl0) +{ + struct bgmac_slot_info *slot; + struct bgmac_dma_desc *dma_desc; + u32 ctl1; + + if (i == BGMAC_TX_RING_SLOTS - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + + ctl1 = len & BGMAC_DESC_CTL1_LEN; + + slot = &ring->slots[i]; + dma_desc = &ring->cpu_base[i]; + dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); +} + static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) { struct device *dma_dev = bgmac->core->dma_dev; struct net_device *net_dev = bgmac->net_dev; - struct bgmac_dma_desc *dma_desc; - struct bgmac_slot_info *slot; - u32 ctl0, ctl1; - int free_slots; + int index = ring->end % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[index]; + int nr_frags; + u32 flags; + int i; if (skb->len > BGMAC_DESC_CTL1_LEN) { bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); - goto err_stop_drop; + goto err_drop; } - if (ring->start <= ring->end) - free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS; - else - free_slots = ring->start - ring->end; - if (free_slots == 1) { + if (skb->ip_summed == CHECKSUM_PARTIAL) + skb_checksum_help(skb); + + nr_frags = skb_shinfo(skb)->nr_frags; + + /* ring->end - ring->start will return the number of valid slots, + * even when ring->end overflows + */ + if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); netif_stop_queue(net_dev); return NETDEV_TX_BUSY; } - slot = &ring->slots[ring->end]; - slot->skb = skb; - slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len, + slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, slot->dma_addr)) { - bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", - ring->mmio_base); - goto err_stop_drop; - } + if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) + goto err_dma_head; - ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF; - if (ring->end == ring->num_slots - 1) - ctl0 |= BGMAC_DESC_CTL0_EOT; - ctl1 = skb->len & BGMAC_DESC_CTL1_LEN; + flags = BGMAC_DESC_CTL0_SOF; + if (!nr_frags) + flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; - dma_desc = ring->cpu_base; - dma_desc += ring->end; - dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); - dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); - dma_desc->ctl0 = cpu_to_le32(ctl0); - dma_desc->ctl1 = cpu_to_le32(ctl1); + bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags); + flags = 0; + + for (i = 0; i < nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int len = skb_frag_size(frag); + + index = (index + 1) % BGMAC_TX_RING_SLOTS; + slot = &ring->slots[index]; + slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0, + len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) + goto err_dma; + + if (i == nr_frags - 1) + flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; + + bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags); + } + slot->skb = skb; + ring->end += nr_frags + 1; netdev_sent_queue(net_dev, skb->len); wmb(); @@ -169,20 +205,34 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, /* Increase ring->end to point empty slot. We tell hardware the first * slot it should *not* read. */ - if (++ring->end >= BGMAC_TX_RING_SLOTS) - ring->end = 0; bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, ring->index_base + - ring->end * sizeof(struct bgmac_dma_desc)); + (ring->end % BGMAC_TX_RING_SLOTS) * + sizeof(struct bgmac_dma_desc)); - /* Always keep one slot free to allow detecting bugged calls. */ - if (--free_slots == 1) + if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8) netif_stop_queue(net_dev); return NETDEV_TX_OK; -err_stop_drop: - netif_stop_queue(net_dev); +err_dma: + dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), + DMA_TO_DEVICE); + + while (i > 0) { + int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[index]; + u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); + int len = ctl1 & BGMAC_DESC_CTL1_LEN; + + dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); + } + +err_dma_head: + bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", + ring->mmio_base); + +err_drop: dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -202,34 +252,45 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) empty_slot &= BGMAC_DMA_TX_STATDPTR; empty_slot /= sizeof(struct bgmac_dma_desc); - while (ring->start != empty_slot) { - struct bgmac_slot_info *slot = &ring->slots[ring->start]; + while (ring->start != ring->end) { + int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[slot_idx]; + u32 ctl1; + int len; - if (slot->skb) { + if (slot_idx == empty_slot) + break; + + ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); + len = ctl1 & BGMAC_DESC_CTL1_LEN; + if (ctl1 & BGMAC_DESC_CTL0_SOF) /* Unmap no longer used buffer */ - dma_unmap_single(dma_dev, slot->dma_addr, - slot->skb->len, DMA_TO_DEVICE); - slot->dma_addr = 0; + dma_unmap_single(dma_dev, slot->dma_addr, len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, slot->dma_addr, len, + DMA_TO_DEVICE); + if (slot->skb) { bytes_compl += slot->skb->len; pkts_compl++; /* Free memory! :) */ dev_kfree_skb(slot->skb); slot->skb = NULL; - } else { - bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n", - ring->start, ring->end); } - if (++ring->start >= BGMAC_TX_RING_SLOTS) - ring->start = 0; + slot->dma_addr = 0; + ring->start++; freed = true; } + if (!pkts_compl) + return; + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); - if (freed && netif_queue_stopped(bgmac->net_dev)) + if (netif_queue_stopped(bgmac->net_dev)) netif_wake_queue(bgmac->net_dev); } @@ -275,46 +336,53 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { struct device *dma_dev = bgmac->core->dma_dev; - struct sk_buff *skb; dma_addr_t dma_addr; struct bgmac_rx_header *rx; + void *buf; /* Alloc skb */ - skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); - if (!skb) + buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE); + if (!buf) return -ENOMEM; /* Poison - if everything goes fine, hardware will overwrite it */ - rx = (struct bgmac_rx_header *)skb->data; + rx = buf + BGMAC_RX_BUF_OFFSET; rx->len = cpu_to_le16(0xdead); rx->flags = cpu_to_le16(0xbeef); /* Map skb for the DMA */ - dma_addr = dma_map_single(dma_dev, skb->data, + dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dma_addr)) { bgmac_err(bgmac, "DMA mapping error\n"); - dev_kfree_skb(skb); + put_page(virt_to_head_page(buf)); return -ENOMEM; } /* Update the slot */ - slot->skb = skb; + slot->buf = buf; slot->dma_addr = dma_addr; - if (slot->dma_addr & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); - return 0; } +static void bgmac_dma_rx_update_index(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + dma_wmb(); + + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, + ring->index_base + + ring->end * sizeof(struct bgmac_dma_desc)); +} + static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int desc_idx) { struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; u32 ctl0 = 0, ctl1 = 0; - if (desc_idx == ring->num_slots - 1) + if (desc_idx == BGMAC_RX_RING_SLOTS - 1) ctl0 |= BGMAC_DESC_CTL0_EOT; ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; /* Is there any BGMAC device that requires extension? */ @@ -326,6 +394,21 @@ static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); dma_desc->ctl0 = cpu_to_le32(ctl0); dma_desc->ctl1 = cpu_to_le32(ctl1); + + ring->end = desc_idx; +} + +static void bgmac_dma_rx_poison_buf(struct device *dma_dev, + struct bgmac_slot_info *slot) +{ + struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; + + dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + rx->len = cpu_to_le16(0xdead); + rx->flags = cpu_to_le16(0xbeef); + dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); } static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, @@ -340,70 +423,62 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, end_slot &= BGMAC_DMA_RX_STATDPTR; end_slot /= sizeof(struct bgmac_dma_desc); - ring->end = end_slot; - - while (ring->start != ring->end) { + while (ring->start != end_slot) { struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; - struct sk_buff *skb = slot->skb; - struct bgmac_rx_header *rx; + struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; + struct sk_buff *skb; + void *buf = slot->buf; + dma_addr_t dma_addr = slot->dma_addr; u16 len, flags; - /* Unmap buffer to make it accessible to the CPU */ - dma_sync_single_for_cpu(dma_dev, slot->dma_addr, - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + do { + /* Prepare new skb as replacement */ + if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) { + bgmac_dma_rx_poison_buf(dma_dev, slot); + break; + } - /* Get info from the header */ - rx = (struct bgmac_rx_header *)skb->data; - len = le16_to_cpu(rx->len); - flags = le16_to_cpu(rx->flags); + /* Unmap buffer to make it accessible to the CPU */ + dma_unmap_single(dma_dev, dma_addr, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); - do { - dma_addr_t old_dma_addr = slot->dma_addr; - int err; + /* Get info from the header */ + len = le16_to_cpu(rx->len); + flags = le16_to_cpu(rx->flags); /* Check for poison and drop or pass the packet */ if (len == 0xdead && flags == 0xbeef) { bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", ring->start); - dma_sync_single_for_device(dma_dev, - slot->dma_addr, - BGMAC_RX_BUF_SIZE, - DMA_FROM_DEVICE); + put_page(virt_to_head_page(buf)); break; } - /* Omit CRC. */ - len -= ETH_FCS_LEN; - - /* Prepare new skb as replacement */ - err = bgmac_dma_rx_skb_for_slot(bgmac, slot); - if (err) { - /* Poison the old skb */ - rx->len = cpu_to_le16(0xdead); - rx->flags = cpu_to_le16(0xbeef); - - dma_sync_single_for_device(dma_dev, - slot->dma_addr, - BGMAC_RX_BUF_SIZE, - DMA_FROM_DEVICE); + if (len > BGMAC_RX_ALLOC_SIZE) { + bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", + ring->start); + put_page(virt_to_head_page(buf)); break; } - bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); - /* Unmap old skb, we'll pass it to the netfif */ - dma_unmap_single(dma_dev, old_dma_addr, - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + /* Omit CRC. */ + len -= ETH_FCS_LEN; - skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); - skb_pull(skb, BGMAC_RX_FRAME_OFFSET); + skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); + skb_put(skb, BGMAC_RX_FRAME_OFFSET + + BGMAC_RX_BUF_OFFSET + len); + skb_pull(skb, BGMAC_RX_FRAME_OFFSET + + BGMAC_RX_BUF_OFFSET); skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); - netif_receive_skb(skb); + napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); + if (++ring->start >= BGMAC_RX_RING_SLOTS) ring->start = 0; @@ -411,6 +486,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, break; } + bgmac_dma_rx_update_index(bgmac, ring); + return handled; } @@ -436,40 +513,90 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac, return false; } -static void bgmac_dma_ring_free(struct bgmac *bgmac, - struct bgmac_dma_ring *ring) +static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) { struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_dma_desc *dma_desc = ring->cpu_base; struct bgmac_slot_info *slot; - int size; int i; - for (i = 0; i < ring->num_slots; i++) { + for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { + int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; + slot = &ring->slots[i]; - if (slot->skb) { - if (slot->dma_addr) - dma_unmap_single(dma_dev, slot->dma_addr, - slot->skb->len, DMA_TO_DEVICE); - dev_kfree_skb(slot->skb); - } + dev_kfree_skb(slot->skb); + + if (!slot->dma_addr) + continue; + + if (slot->skb) + dma_unmap_single(dma_dev, slot->dma_addr, + len, DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, slot->dma_addr, + len, DMA_TO_DEVICE); } +} + +static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_slot_info *slot; + int i; - if (ring->cpu_base) { - /* Free ring of descriptors */ - size = ring->num_slots * sizeof(struct bgmac_dma_desc); - dma_free_coherent(dma_dev, size, ring->cpu_base, - ring->dma_base); + for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) { + slot = &ring->slots[i]; + if (!slot->dma_addr) + continue; + + dma_unmap_single(dma_dev, slot->dma_addr, + BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + put_page(virt_to_head_page(slot->buf)); + slot->dma_addr = 0; } } +static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, + int num_slots) +{ + struct device *dma_dev = bgmac->core->dma_dev; + int size; + + if (!ring->cpu_base) + return; + + /* Free ring of descriptors */ + size = num_slots * sizeof(struct bgmac_dma_desc); + dma_free_coherent(dma_dev, size, ring->cpu_base, + ring->dma_base); +} + +static void bgmac_dma_cleanup(struct bgmac *bgmac) +{ + int i; + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) + bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]); + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) + bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]); +} + static void bgmac_dma_free(struct bgmac *bgmac) { int i; for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) - bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]); + bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i], + BGMAC_TX_RING_SLOTS); + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) - bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]); + bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i], + BGMAC_RX_RING_SLOTS); } static int bgmac_dma_alloc(struct bgmac *bgmac) @@ -492,11 +619,10 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { ring = &bgmac->tx_ring[i]; - ring->num_slots = BGMAC_TX_RING_SLOTS; ring->mmio_base = ring_base[i]; /* Alloc ring of descriptors */ - size = ring->num_slots * sizeof(struct bgmac_dma_desc); + size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); ring->cpu_base = dma_zalloc_coherent(dma_dev, size, &ring->dma_base, GFP_KERNEL); @@ -505,8 +631,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) ring->mmio_base); goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX); @@ -519,14 +643,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { - int j; - ring = &bgmac->rx_ring[i]; - ring->num_slots = BGMAC_RX_RING_SLOTS; ring->mmio_base = ring_base[i]; /* Alloc ring of descriptors */ - size = ring->num_slots * sizeof(struct bgmac_dma_desc); + size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); ring->cpu_base = dma_zalloc_coherent(dma_dev, size, &ring->dma_base, GFP_KERNEL); @@ -536,8 +657,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) err = -ENOMEM; goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX); @@ -545,15 +664,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) ring->index_base = lower_32_bits(ring->dma_base); else ring->index_base = 0; - - /* Alloc RX slots */ - for (j = 0; j < ring->num_slots; j++) { - err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); - if (err) { - bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n"); - goto err_dma_free; - } - } } return 0; @@ -563,10 +673,10 @@ err_dma_free: return -ENOMEM; } -static void bgmac_dma_init(struct bgmac *bgmac) +static int bgmac_dma_init(struct bgmac *bgmac) { struct bgmac_dma_ring *ring; - int i; + int i, err; for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { ring = &bgmac->tx_ring[i]; @@ -598,16 +708,24 @@ static void bgmac_dma_init(struct bgmac *bgmac) if (ring->unaligned) bgmac_dma_rx_enable(bgmac, ring); - for (j = 0; j < ring->num_slots; j++) - bgmac_dma_rx_setup_desc(bgmac, ring, j); - - bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, - ring->index_base + - ring->num_slots * sizeof(struct bgmac_dma_desc)); - ring->start = 0; ring->end = 0; + for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) { + err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); + if (err) + goto error; + + bgmac_dma_rx_setup_desc(bgmac, ring, j); + } + + bgmac_dma_rx_update_index(bgmac, ring); } + + return 0; + +error: + bgmac_dma_cleanup(bgmac); + return err; } /************************************************** @@ -1015,8 +1133,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac) bgmac_phy_init(bgmac); netdev_reset_queue(bgmac->net_dev); - - bgmac->int_status = 0; } static void bgmac_chip_intrs_on(struct bgmac *bgmac) @@ -1085,11 +1201,8 @@ static void bgmac_enable(struct bgmac *bgmac) } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ -static void bgmac_chip_init(struct bgmac *bgmac, bool full_init) +static void bgmac_chip_init(struct bgmac *bgmac) { - struct bgmac_dma_ring *ring; - int i; - /* 1 interrupt per received frame */ bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT); @@ -1107,16 +1220,7 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init) bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); - if (full_init) { - bgmac_dma_init(bgmac); - if (1) /* FIXME: is there any case we don't want IRQs? */ - bgmac_chip_intrs_on(bgmac); - } else { - for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { - ring = &bgmac->rx_ring[i]; - bgmac_dma_rx_enable(bgmac, ring); - } - } + bgmac_chip_intrs_on(bgmac); bgmac_enable(bgmac); } @@ -1131,14 +1235,13 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id) if (!int_status) return IRQ_NONE; - /* Ack */ - bgmac_write(bgmac, BGMAC_INT_STATUS, int_status); + int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX); + if (int_status) + bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status); /* Disable new interrupts until handling existing ones */ bgmac_chip_intrs_off(bgmac); - bgmac->int_status = int_status; - napi_schedule(&bgmac->napi); return IRQ_HANDLED; @@ -1147,25 +1250,17 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id) static int bgmac_poll(struct napi_struct *napi, int weight) { struct bgmac *bgmac = container_of(napi, struct bgmac, napi); - struct bgmac_dma_ring *ring; int handled = 0; - if (bgmac->int_status & BGMAC_IS_TX0) { - ring = &bgmac->tx_ring[0]; - bgmac_dma_tx_free(bgmac, ring); - bgmac->int_status &= ~BGMAC_IS_TX0; - } + /* Ack */ + bgmac_write(bgmac, BGMAC_INT_STATUS, ~0); - if (bgmac->int_status & BGMAC_IS_RX) { - ring = &bgmac->rx_ring[0]; - handled += bgmac_dma_rx_read(bgmac, ring, weight); - bgmac->int_status &= ~BGMAC_IS_RX; - } + bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]); + handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight); - if (bgmac->int_status) { - bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status); - bgmac->int_status = 0; - } + /* Poll again if more events arrived in the meantime */ + if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) + return handled; if (handled < weight) { napi_complete(napi); @@ -1185,23 +1280,27 @@ static int bgmac_open(struct net_device *net_dev) int err = 0; bgmac_chip_reset(bgmac); + + err = bgmac_dma_init(bgmac); + if (err) + return err; + /* Specs say about reclaiming rings here, but we do that in DMA init */ - bgmac_chip_init(bgmac, true); + bgmac_chip_init(bgmac); err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, KBUILD_MODNAME, net_dev); if (err < 0) { bgmac_err(bgmac, "IRQ request error: %d!\n", err); - goto err_out; + bgmac_dma_cleanup(bgmac); + return err; } napi_enable(&bgmac->napi); phy_start(bgmac->phy_dev); netif_carrier_on(net_dev); - -err_out: - return err; + return 0; } static int bgmac_stop(struct net_device *net_dev) @@ -1217,6 +1316,7 @@ static int bgmac_stop(struct net_device *net_dev) free_irq(bgmac->core->irq, net_dev); bgmac_chip_reset(bgmac); + bgmac_dma_cleanup(bgmac); return 0; } @@ -1337,13 +1437,46 @@ static void bgmac_adjust_link(struct net_device *net_dev) } } +static int bgmac_fixed_phy_register(struct bgmac *bgmac) +{ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + }; + struct phy_device *phy_dev; + int err; + + phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phy_dev || IS_ERR(phy_dev)) { + bgmac_err(bgmac, "Failed to register fixed PHY device\n"); + return -ENODEV; + } + + err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, + PHY_INTERFACE_MODE_MII); + if (err) { + bgmac_err(bgmac, "Connecting PHY failed\n"); + return err; + } + + bgmac->phy_dev = phy_dev; + + return err; +} + static int bgmac_mii_register(struct bgmac *bgmac) { + struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; int i, err = 0; + if (ci->id == BCMA_CHIP_ID_BCM4707 || + ci->id == BCMA_CHIP_ID_BCM53018) + return bgmac_fixed_phy_register(bgmac); + mii_bus = mdiobus_alloc(); if (!mii_bus) return -ENOMEM; @@ -1524,6 +1657,10 @@ static int bgmac_probe(struct bcma_device *core) goto err_dma_free; } + net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + net_dev->hw_features = net_dev->features; + net_dev->vlan_features = net_dev->features; + err = register_netdev(bgmac->net_dev); if (err) { bgmac_err(bgmac, "Cannot register net device\n"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 89fa5bc69c51..db27febbb215 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -345,8 +345,8 @@ #define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */ #define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */ -#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */ -#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */ +#define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */ +#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */ #define BGMAC_DESC_CTL1_LEN 0x00001FFF #define BGMAC_PHY_NOREGS 0x1E @@ -356,12 +356,16 @@ #define BGMAC_MAX_RX_RINGS 1 #define BGMAC_TX_RING_SLOTS 128 -#define BGMAC_RX_RING_SLOTS 512 - 1 /* Why -1? Well, Broadcom does that... */ +#define BGMAC_RX_RING_SLOTS 512 #define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */ #define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */ +#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \ + BGMAC_RX_FRAME_OFFSET) #define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */ #define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE) +#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */ #define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */ @@ -383,7 +387,10 @@ #define ETHER_MAX_LEN 1518 struct bgmac_slot_info { - struct sk_buff *skb; + union { + struct sk_buff *skb; + void *buf; + }; dma_addr_t dma_addr; }; @@ -409,14 +416,13 @@ enum bgmac_dma_ring_type { * empty. */ struct bgmac_dma_ring { - u16 num_slots; - u16 start; - u16 end; + u32 start; + u32 end; - u16 mmio_base; struct bgmac_dma_desc *cpu_base; dma_addr_t dma_base; u32 index_base; /* Used for unaligned rings only, otherwise 0 */ + u16 mmio_base; bool unaligned; struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS]; @@ -447,7 +453,6 @@ struct bgmac { /* Int */ u32 int_mask; - u32 int_status; /* Current MAC state */ int mac_speed; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 02bf0b86995b..2b66ef3d8217 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1,7 +1,7 @@ -/* bnx2.c: QLogic NX2 network driver. +/* bnx2.c: QLogic bnx2 network driver. * * Copyright (c) 2004-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,8 +58,8 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.5" -#define DRV_MODULE_RELDATE "December 20, 2013" +#define DRV_MODULE_VERSION "2.2.6" +#define DRV_MODULE_RELDATE "January 29, 2014" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" @@ -72,10 +72,10 @@ #define TX_TIMEOUT (5*HZ) static char version[] = - "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); -MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver"); +MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FW_MIPS_FILE_06); @@ -4984,8 +4984,6 @@ bnx2_init_chip(struct bnx2 *bp) bp->idle_chk_status_idx = 0xffff; - bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; - /* Set up how to generate a link change interrupt. */ BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); @@ -7710,17 +7708,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) return 0; } -static netdev_features_t -bnx2_fix_features(struct net_device *dev, netdev_features_t features) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) - features |= NETIF_F_HW_VLAN_CTAG_RX; - - return features; -} - static int bnx2_set_features(struct net_device *dev, netdev_features_t features) { @@ -8527,7 +8514,6 @@ static const struct net_device_ops bnx2_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnx2_change_mac_addr, .ndo_change_mtu = bnx2_change_mtu, - .ndo_fix_features = bnx2_fix_features, .ndo_set_features = bnx2_set_features, .ndo_tx_timeout = bnx2_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -8578,6 +8564,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= dev->hw_features; dev->priv_flags |= IFF_UNICAST_FLT; + if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + if ((rc = register_netdev(dev))) { dev_err(&pdev->dev, "Cannot register net device\n"); goto error; diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 28df35d35893..f92f76c44756 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -1,7 +1,7 @@ -/* bnx2.h: QLogic NX2 network driver. +/* bnx2.h: QLogic bnx2 network driver. * * Copyright (c) 2004-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h index 7db79c28b5ff..b0f2ccadaffd 100644 --- a/drivers/net/ethernet/broadcom/bnx2_fw.h +++ b/drivers/net/ethernet/broadcom/bnx2_fw.h @@ -1,7 +1,7 @@ -/* bnx2_fw.h: QLogic NX2 network driver. +/* bnx2_fw.h: QLogic bnx2 network driver. * * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 756053c028be..355d5fea5be9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -531,20 +531,8 @@ struct bnx2x_fastpath { struct napi_struct napi; #ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define BNX2X_FP_STATE_IDLE 0 -#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ -#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ -#define BNX2X_FP_STATE_DISABLED (1 << 2) -#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ -#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ -#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) -#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) -#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) -#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) - /* protect state */ - spinlock_t lock; -#endif /* CONFIG_NET_RX_BUSY_POLL */ + unsigned long busy_poll_state; +#endif union host_hc_status_block status_blk; /* chip independent shortcuts into sb structure */ @@ -619,104 +607,83 @@ struct bnx2x_fastpath { #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) #ifdef CONFIG_NET_RX_BUSY_POLL -static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) + +enum bnx2x_fp_state { + BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */ + + BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */ + BNX2X_STATE_FP_NAPI_REQ = BIT(1), + + BNX2X_STATE_FP_POLL_BIT = 2, + BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */ + + BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */ +}; + +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) { - spin_lock_init(&fp->lock); - fp->state = BNX2X_FP_STATE_IDLE; + WRITE_ONCE(fp->busy_poll_state, 0); } /* called from the device poll routine to get ownership of a FP */ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) { - bool rc = true; - - spin_lock_bh(&fp->lock); - if (fp->state & BNX2X_FP_LOCKED) { - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); - fp->state |= BNX2X_FP_STATE_NAPI_YIELD; - rc = false; - } else { - /* we don't care if someone yielded */ - fp->state = BNX2X_FP_STATE_NAPI; + unsigned long prev, old = READ_ONCE(fp->busy_poll_state); + + while (1) { + switch (old) { + case BNX2X_STATE_FP_POLL: + /* make sure bnx2x_fp_lock_poll() wont starve us */ + set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT, + &fp->busy_poll_state); + /* fallthrough */ + case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ: + return false; + default: + break; + } + prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI); + if (unlikely(prev != old)) { + old = prev; + continue; + } + return true; } - spin_unlock_bh(&fp->lock); - return rc; } -/* returns true is someone tried to get the FP while napi had it */ -static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) { - bool rc = false; - - spin_lock_bh(&fp->lock); - WARN_ON(fp->state & - (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); - - if (fp->state & BNX2X_FP_STATE_POLL_YIELD) - rc = true; - - /* state ==> idle, unless currently disabled */ - fp->state &= BNX2X_FP_STATE_DISABLED; - spin_unlock_bh(&fp->lock); - return rc; + smp_wmb(); + fp->busy_poll_state = 0; } /* called from bnx2x_low_latency_poll() */ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) { - bool rc = true; - - spin_lock_bh(&fp->lock); - if ((fp->state & BNX2X_FP_LOCKED)) { - fp->state |= BNX2X_FP_STATE_POLL_YIELD; - rc = false; - } else { - /* preserve yield marks */ - fp->state |= BNX2X_FP_STATE_POLL; - } - spin_unlock_bh(&fp->lock); - return rc; + return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0; } -/* returns true if someone tried to get the FP while it was locked */ -static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) { - bool rc = false; - - spin_lock_bh(&fp->lock); - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); - - if (fp->state & BNX2X_FP_STATE_POLL_YIELD) - rc = true; - - /* state ==> idle, unless currently disabled */ - fp->state &= BNX2X_FP_STATE_DISABLED; - spin_unlock_bh(&fp->lock); - return rc; + smp_mb__before_atomic(); + clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state); } -/* true if a socket is polling, even if it did not get the lock */ +/* true if a socket is polling */ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { - WARN_ON(!(fp->state & BNX2X_FP_OWNED)); - return fp->state & BNX2X_FP_USER_PEND; + return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; } /* false if fp is currently owned */ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) { - int rc = true; - - spin_lock_bh(&fp->lock); - if (fp->state & BNX2X_FP_OWNED) - rc = false; - fp->state |= BNX2X_FP_STATE_DISABLED; - spin_unlock_bh(&fp->lock); + set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state); + return !bnx2x_fp_ll_polling(fp); - return rc; } #else -static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) { } @@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) return true; } -static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) { - return false; } static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) @@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) return false; } -static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) { - return false; } static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) @@ -1811,7 +1776,7 @@ struct bnx2x { int stats_state; /* used for synchronization of concurrent threads statistics handling */ - spinlock_t stats_lock; + struct mutex stats_lock; /* used by dmae command loader */ struct dmae_command stats_dmae; @@ -1935,8 +1900,6 @@ struct bnx2x { int fp_array_size; u32 dump_preset_idx; - bool stats_started; - struct semaphore stats_sema; u8 phys_port_id[ETH_ALEN]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 0a9faa134a9a..2f63467bce46 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) int i; for_each_rx_queue_cnic(bp, i) { - bnx2x_fp_init_lock(&bp->fp[i]); + bnx2x_fp_busy_poll_init(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); } } @@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) int i; for_each_eth_queue(bp, i) { - bnx2x_fp_init_lock(&bp->fp[i]); + bnx2x_fp_busy_poll_init(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); } } @@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) } } + bnx2x_fp_unlock_napi(fp); + /* Fall out from the NAPI loop if needed */ - if (!bnx2x_fp_unlock_napi(fp) && - !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* No need to update SB for FCoE L2 ring as long as * it's connected to the default SB and the SB diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ffe4e003e636..e3d853cab7c9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2446,7 +2446,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) } packet = skb_put(skb, pkt_size); memcpy(packet, bp->dev->dev_addr, ETH_ALEN); - memset(packet + ETH_ALEN, 0, ETH_ALEN); + eth_zero_addr(packet + ETH_ALEN); memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); for (i = ETH_HLEN; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 583591d52497..058bc7328220 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -521,6 +521,17 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ */ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000 #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16 + /* Set non-default values for TXFIR in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_IFIR_MASK 0x00F00000 + #define PORT_HW_CFG_TX_DRV_IFIR_SHIFT 20 + + /* Set non-default values for IPREDRIVER in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK 0x0F000000 + #define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT 24 + + /* Set non-default values for POST2 in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_POST2_MASK 0xF0000000 + #define PORT_HW_CFG_TX_DRV_POST2_SHIFT 28 u32 reserved0[5]; /* 0x17c */ @@ -2247,8 +2258,8 @@ struct shmem2_region { #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000 u32 reserved5[2]; - u32 reserved6[PORT_MAX]; - + u32 link_change_count[PORT_MAX]; /* Offset 0x160-0x164 */ + #define LINK_CHANGE_COUNT_MASK 0xff /* Offset 0x168 */ /* driver version for each personality */ struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index bd90e50bd8e6..d6e1975b7b69 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -278,7 +278,7 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, } -/* congestion managment port init api description +/* congestion management port init api description * the api works as follows: * the driver should pass the cmng_init_input struct, the port_init function * will prepare the required internal ram structure which will be passed back diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 778e4cd32571..21a0d6afca4a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -195,6 +195,10 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, #define MAX_PACKET_SIZE (9700) #define MAX_KR_LINK_RETRY 4 +#define DEFAULT_TX_DRV_BRDCT 2 +#define DEFAULT_TX_DRV_IFIR 0 +#define DEFAULT_TX_DRV_POST2 3 +#define DEFAULT_TX_DRV_IPRE_DRIVER 6 /**********************************************************/ /* INTERFACE */ @@ -563,7 +567,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( * Will return the NIG ETS registers to init values.Except * credit_upper_bound. * That isn't used in this configuration (No WFQ is enabled) and will be -* configured acording to spec +* configured according to spec *. ******************************************************************************/ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, @@ -680,7 +684,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( * Will return the PBF ETS registers to init values.Except * credit_upper_bound. * That isn't used in this configuration (No WFQ is enabled) and will be -* configured acording to spec +* configured according to spec *. ******************************************************************************/ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) @@ -738,7 +742,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) } /****************************************************************************** * Description: -* E3B0 disable will return basicly the values to init values. +* E3B0 disable will return basically the values to init values. *. ******************************************************************************/ static int bnx2x_ets_e3b0_disabled(const struct link_params *params, @@ -761,7 +765,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params, /****************************************************************************** * Description: -* Disable will return basicly the values to init values. +* Disable will return basically the values to init values. * ******************************************************************************/ int bnx2x_ets_disabled(struct link_params *params, @@ -2938,7 +2942,7 @@ static int bnx2x_eee_initial_config(struct link_params *params, { vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT; - /* Propogate params' bits --> vars (for migration exposure) */ + /* Propagate params' bits --> vars (for migration exposure) */ if (params->eee_mode & EEE_MODE_ENABLE_LPI) vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; else @@ -3595,10 +3599,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, * init configuration, and set/clear SGMII flag. Internal * phy init is done purely in phy_init stage. */ -#define WC_TX_DRIVER(post2, idriver, ipre) \ +#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \ - (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)) + (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \ + (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET)) #define WC_TX_FIR(post, main, pre) \ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \ @@ -3765,12 +3770,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - WC_TX_DRIVER(0x02, 0x06, 0x09)); + WC_TX_DRIVER(0x02, 0x06, 0x09, 0)); /* Configure the next lane if dual mode */ if (phy->flags & FLAGS_WC_DUAL_MODE) bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1), - WC_TX_DRIVER(0x02, 0x06, 0x09)); + WC_TX_DRIVER(0x02, 0x06, 0x09, 0)); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 0x03f0); @@ -3933,6 +3938,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 misc1_val, tap_val, tx_driver_val, lane, val; u32 cfg_tap_val, tx_drv_brdct, tx_equal; + u32 ifir_val, ipost2_val, ipre_driver_val; /* Hold rxSeqStart */ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, @@ -3978,7 +3984,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, if (is_xfi) { misc1_val |= 0x5; tap_val = WC_TX_FIR(0x08, 0x37, 0x00); - tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03); + tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0); } else { cfg_tap_val = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. @@ -3987,10 +3993,6 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK; - tx_drv_brdct = (cfg_tap_val & - PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> - PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; - misc1_val |= 0x9; /* TAP values are controlled by nvram, if value there isn't 0 */ @@ -3999,11 +4001,36 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, else tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02); - if (tx_drv_brdct) - tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct, - 0x06); - else - tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06); + ifir_val = DEFAULT_TX_DRV_IFIR; + ipost2_val = DEFAULT_TX_DRV_POST2; + ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER; + tx_drv_brdct = DEFAULT_TX_DRV_BRDCT; + + /* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all + * configuration. + */ + if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK | + PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK | + PORT_HW_CFG_TX_DRV_POST2_MASK)) { + ifir_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_IFIR_MASK) >> + PORT_HW_CFG_TX_DRV_IFIR_SHIFT; + ipre_driver_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK) + >> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT; + ipost2_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_POST2_MASK) >> + PORT_HW_CFG_TX_DRV_POST2_SHIFT; + } + + if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) { + tx_drv_brdct = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> + PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; + } + + tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct, + ipre_driver_val, ifir_val); } bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); @@ -4144,7 +4171,7 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, MDIO_WC_REG_TX_FIR_TAP_ENABLE)); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - WC_TX_DRIVER(0x02, 0x02, 0x02)); + WC_TX_DRIVER(0x02, 0x02, 0x02, 0)); } static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, @@ -6731,6 +6758,25 @@ static int bnx2x_update_link_up(struct link_params *params, msleep(20); return rc; } + +static void bnx2x_chng_link_count(struct link_params *params, bool clear) +{ + struct bnx2x *bp = params->bp; + u32 addr, val; + + /* Verify the link_change_count is supported by the MFW */ + if (!(SHMEM2_HAS(bp, link_change_count))) + return; + + addr = params->shmem2_base + + offsetof(struct shmem2_region, link_change_count[params->port]); + if (clear) + val = 0; + else + val = REG_RD(bp, addr) + 1; + REG_WR(bp, addr, val); +} + /* The bnx2x_link_update function should be called upon link * interrupt. * Link is considered up as follows: @@ -6749,6 +6795,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) struct link_vars phy_vars[MAX_PHYS]; u8 port = params->port; u8 link_10g_plus, phy_index; + u32 prev_link_status = vars->link_status; u8 ext_phy_link_up = 0, cur_link_up; int rc = 0; u8 is_mi_int = 0; @@ -6988,6 +7035,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) else rc = bnx2x_update_link_down(params, vars); + if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP) + bnx2x_chng_link_count(params, false); + /* Update MCP link status was changed */ if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX) bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); @@ -12631,6 +12681,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) params->link_flags = PHY_INITIALIZED; /* Driver opens NIG-BRB filters */ bnx2x_set_rx_filter(params, 1); + bnx2x_chng_link_count(params, true); /* Check if link flap can be avoided */ lfa_status = bnx2x_check_lfa(params); @@ -12705,6 +12756,7 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); /* Disable attentions */ vars->link_status = 0; + bnx2x_chng_link_count(params, true); bnx2x_update_mng(params, vars->link_status); vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | SHMEM_EEE_ACTIVE_BIT); @@ -13308,7 +13360,7 @@ static void bnx2x_check_over_curr(struct link_params *params, vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; } -/* Returns 0 if no change occured since last check; 1 otherwise. */ +/* Returns 0 if no change occurred since last check; 1 otherwise. */ static u8 bnx2x_analyze_link_error(struct link_params *params, struct link_vars *vars, u32 status, u32 phy_flag, u32 link_flag, u8 notify) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..b9f85fccb419 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -129,8 +129,8 @@ struct bnx2x_mac_vals { u32 xmac_val; u32 emac_addr; u32 emac_val; - u32 umac_addr; - u32 umac_val; + u32 umac_addr[2]; + u32 umac_val[2]; u32 bmac_addr; u32 bmac_val[2]; }; @@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) return 0; } +/* previous driver DMAE transaction may have occurred when pre-boot stage ended + * and boot began, or when kdump kernel was loaded. Either case would invalidate + * the addresses of the transaction, resulting in was-error bit set in the pci + * causing all hw-to-host pcie transactions to timeout. If this happened we want + * to clear the interrupt which detected this from the pglueb and the was done + * bit + */ +static void bnx2x_clean_pglue_errors(struct bnx2x *bp) +{ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_ABS_FUNC(bp)); +} + static int bnx2x_init_hw_func(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); + bnx2x_clean_pglue_errors(bp); bnx2x_init_block(bp, BLOCK_ATC, init_phase); bnx2x_init_block(bp, BLOCK_DMAE, init_phase); @@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) return base + (BP_ABS_FUNC(bp)) * stride; } +static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, + u8 port, u32 reset_reg, + struct bnx2x_mac_vals *vals) +{ + u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; + u32 base_addr; + + if (!(mask & reset_reg)) + return false; + + BNX2X_DEV_INFO("Disable umac Rx %02x\n", port); + base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; + vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); + REG_WR(bp, vals->umac_addr[port], 0); + + return true; +} + static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, struct bnx2x_mac_vals *vals) { @@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, u8 port = BP_PORT(bp); /* reset addresses as they also mark which values were changed */ - vals->bmac_addr = 0; - vals->umac_addr = 0; - vals->xmac_addr = 0; - vals->emac_addr = 0; + memset(vals, 0, sizeof(*vals)); reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); @@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, REG_WR(bp, vals->xmac_addr, 0); mac_stopped = true; } - mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; - if (mask & reset_reg) { - BNX2X_DEV_INFO("Disable umac Rx\n"); - base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; - vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; - vals->umac_val = REG_RD(bp, vals->umac_addr); - REG_WR(bp, vals->umac_addr, 0); - mac_stopped = true; - } + + mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, + reset_reg, vals); + mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, + reset_reg, vals); } if (mac_stopped) @@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) /* Close the MAC Rx to prevent BRB from filling up */ bnx2x_prev_unload_close_mac(bp, &mac_vals); - /* close LLH filters towards the BRB */ + /* close LLH filters for both ports towards the BRB */ + bnx2x_set_rx_filter(&bp->link_params, 0); + bp->link_params.port ^= 1; bnx2x_set_rx_filter(&bp->link_params, 0); + bp->link_params.port ^= 1; /* Check if the UNDI driver was previously loaded */ if (bnx2x_prev_is_after_undi(bp)) { @@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) if (mac_vals.xmac_addr) REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); - if (mac_vals.umac_addr) - REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); + if (mac_vals.umac_addr[0]) + REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); + if (mac_vals.umac_addr[1]) + REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); if (mac_vals.emac_addr) REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); if (mac_vals.bmac_addr) { @@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) return bnx2x_prev_mcp_done(bp); } -/* previous driver DMAE transaction may have occurred when pre-boot stage ended - * and boot began, or when kdump kernel was loaded. Either case would invalidate - * the addresses of the transaction, resulting in was-error bit set in the pci - * causing all hw-to-host pcie transactions to timeout. If this happened we want - * to clear the interrupt which detected this from the pglueb and the was done - * bit - */ -static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) -{ - if (!CHIP_IS_E1x(bp)) { - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { - DP(BNX2X_MSG_SP, - "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, - 1 << BP_FUNC(bp)); - } - } -} - static int bnx2x_prev_unload(struct bnx2x *bp) { int time_counter = 10; @@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) /* clear hw from errors which may have resulted from an interrupted * dmae transaction. */ - bnx2x_prev_interrupted_dmae(bp); + bnx2x_clean_pglue_errors(bp); /* Release previously held locks */ hw_lock_reg = (BP_FUNC(bp) <= 5) ? @@ -11546,13 +11556,13 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) /* Disable iSCSI OOO if MAC configuration is invalid. */ if (!is_valid_ether_addr(iscsi_mac)) { bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - memset(iscsi_mac, 0, ETH_ALEN); + eth_zero_addr(iscsi_mac); } /* Disable FCoE if MAC configuration is invalid. */ if (!is_valid_ether_addr(fip_mac)) { bp->flags |= NO_FCOE_FLAG; - memset(bp->fip_mac, 0, ETH_ALEN); + eth_zero_addr(bp->fip_mac); } } @@ -11563,7 +11573,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) int port = BP_PORT(bp); /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(bp->dev->dev_addr); if (BP_NOMCP(bp)) { BNX2X_ERROR("warning: random MAC workaround active\n"); @@ -11610,7 +11620,7 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp) u32 cfg; if (IS_VF(bp)) - return 0; + return false; if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { /* Take function: tmp = func */ @@ -11650,6 +11660,13 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) u32 val = 0, val2 = 0; int rc = 0; + /* Validate that chip access is feasible */ + if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) { + dev_err(&bp->pdev->dev, + "Chip read returns all Fs. Preventing probe from continuing\n"); + return -EINVAL; + } + bnx2x_get_common_hwinfo(bp); /* @@ -12037,9 +12054,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); mutex_init(&bp->drv_info_mutex); + mutex_init(&bp->stats_lock); bp->drv_info_mng_owner = false; - spin_lock_init(&bp->stats_lock); - sema_init(&bp->stats_sema, 1); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); @@ -12557,6 +12573,7 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { + features = vlan_features_check(skb, features); return vxlan_features_check(skb, features); } @@ -12722,6 +12739,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + /* Set PCIe reset type to fundamental for EEH recovery */ + pdev->needs_freset = 1; + /* AER (Advanced Error reporting) configuration */ rc = pci_enable_pcie_error_reporting(pdev); if (!rc) @@ -12766,7 +12786,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; - if (!CHIP_IS_E1x(bp)) { + if (!chip_is_e1x) { dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; dev->hw_enc_features = @@ -13275,30 +13295,27 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); u64 ns; - u32 remainder; ns = timecounter_read(&bp->timecounter); DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); u64 ns; - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); @@ -13330,8 +13347,8 @@ static void bnx2x_register_phc(struct bnx2x *bp) bp->ptp_clock_info.pps = 0; bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; - bp->ptp_clock_info.gettime = bnx2x_ptp_gettime; - bp->ptp_clock_info.settime = bnx2x_ptp_settime; + bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; + bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; bp->ptp_clock_info.enable = bnx2x_ptp_enable; bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); @@ -13665,9 +13682,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) cancel_delayed_work_sync(&bp->sp_task); cancel_delayed_work_sync(&bp->period_task); - spin_lock_bh(&bp->stats_lock); + mutex_lock(&bp->stats_lock); bp->stats_state = STATS_STATE_DISABLED; - spin_unlock_bh(&bp->stats_lock); + mutex_unlock(&bp->stats_lock); bnx2x_save_statistics(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 6fe547c93e74..49d511092c82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -29,7 +29,7 @@ #define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1) /* [RW 1] Initiate the ATC array - reset all the valid bits */ #define ATC_REG_ATC_INIT_ARRAY 0x1100b8 -/* [R 1] ATC initalization done */ +/* [R 1] ATC initialization done */ #define ATC_REG_ATC_INIT_DONE 0x1100bc /* [RC 6] Interrupt register #0 read clear */ #define ATC_REG_ATC_INT_STS_CLR 0x1101c0 @@ -7341,6 +7341,8 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081 #define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091 #define MDIO_WC_REG_TX0_TX_DRIVER 0x8067 +#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET 0x01 +#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK 0x000e #define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04 #define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0 #define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e5aca2de1871..f67348d16966 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -592,7 +592,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), GFP_KERNEL); if (!mc) { - BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); + BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n"); return -ENOMEM; } } @@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) cookie.vf = vf; cookie.state = VF_ACQUIRED; - bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + if (rc) + goto op_err; } DP(BNX2X_MSG_IOV, "set state to acquired\n"); @@ -2693,7 +2695,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); else /* function has not been loaded yet. Show mac as 0s */ - memset(&ivi->mac, 0, ETH_ALEN); + eth_zero_addr(ivi->mac); /* vlan */ if (bulletin->valid_bitmap & (1 << VLAN_VALID)) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d1608297c773..266b055c2360 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp) */ static void bnx2x_storm_stats_post(struct bnx2x *bp) { - if (!bp->stats_pending) { - int rc; + int rc; - spin_lock_bh(&bp->stats_lock); - - if (bp->stats_pending) { - spin_unlock_bh(&bp->stats_lock); - return; - } - - bp->fw_stats_req->hdr.drv_stats_counter = - cpu_to_le16(bp->stats_counter++); + if (bp->stats_pending) + return; - DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", - le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); + bp->fw_stats_req->hdr.drv_stats_counter = + cpu_to_le16(bp->stats_counter++); - /* adjust the ramrod to include VF queues statistics */ - bnx2x_iov_adjust_stats_req(bp); - bnx2x_dp_stats(bp); + DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", + le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); - /* send FW stats ramrod */ - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, - U64_HI(bp->fw_stats_req_mapping), - U64_LO(bp->fw_stats_req_mapping), - NONE_CONNECTION_TYPE); - if (rc == 0) - bp->stats_pending = 1; + /* adjust the ramrod to include VF queues statistics */ + bnx2x_iov_adjust_stats_req(bp); + bnx2x_dp_stats(bp); - spin_unlock_bh(&bp->stats_lock); - } + /* send FW stats ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, + U64_HI(bp->fw_stats_req_mapping), + U64_LO(bp->fw_stats_req_mapping), + NONE_CONNECTION_TYPE); + if (rc == 0) + bp->stats_pending = 1; } static void bnx2x_hw_stats_post(struct bnx2x *bp) @@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp) */ /* should be called under stats_sema */ -static void __bnx2x_stats_pmf_update(struct bnx2x *bp) +static void bnx2x_stats_pmf_update(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; @@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) } /* should be called under stats_sema */ -static void __bnx2x_stats_start(struct bnx2x *bp) +static void bnx2x_stats_start(struct bnx2x *bp) { if (IS_PF(bp)) { if (bp->port.pmf) @@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); } - - bp->stats_started = true; -} - -static void bnx2x_stats_start(struct bnx2x *bp) -{ - if (down_timeout(&bp->stats_sema, HZ/10)) - BNX2X_ERR("Unable to acquire stats lock\n"); - __bnx2x_stats_start(bp); - up(&bp->stats_sema); } static void bnx2x_stats_pmf_start(struct bnx2x *bp) { - if (down_timeout(&bp->stats_sema, HZ/10)) - BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - __bnx2x_stats_pmf_update(bp); - __bnx2x_stats_start(bp); - up(&bp->stats_sema); -} - -static void bnx2x_stats_pmf_update(struct bnx2x *bp) -{ - if (down_timeout(&bp->stats_sema, HZ/10)) - BNX2X_ERR("Unable to acquire stats lock\n"); - __bnx2x_stats_pmf_update(bp); - up(&bp->stats_sema); + bnx2x_stats_pmf_update(bp); + bnx2x_stats_start(bp); } static void bnx2x_stats_restart(struct bnx2x *bp) @@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp) */ if (IS_VF(bp)) return; - if (down_timeout(&bp->stats_sema, HZ/10)) - BNX2X_ERR("Unable to acquire stats lock\n"); + bnx2x_stats_comp(bp); - __bnx2x_stats_start(bp); - up(&bp->stats_sema); + bnx2x_stats_start(bp); } static void bnx2x_bmac_stats_update(struct bnx2x *bp) @@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); - /* we run update from timer context, so give up - * if somebody is in the middle of transition - */ - if (down_trylock(&bp->stats_sema)) + if (bnx2x_edebug_stats_stopped(bp)) return; - if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) - goto out; - if (IS_PF(bp)) { if (*stats_comp != DMAE_COMP_VAL) - goto out; + return; if (bp->port.pmf) bnx2x_hw_stats_update(bp); @@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) BNX2X_ERR("storm stats were not updated for 3 times\n"); bnx2x_panic(); } - goto out; + return; } } else { /* vf doesn't collect HW statistics, and doesn't get completions @@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) /* vf is done */ if (IS_VF(bp)) - goto out; + return; if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; @@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); - -out: - up(&bp->stats_sema); } static void bnx2x_port_stats_stop(struct bnx2x *bp) @@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) static void bnx2x_stats_stop(struct bnx2x *bp) { - int update = 0; - - if (down_timeout(&bp->stats_sema, HZ/10)) - BNX2X_ERR("Unable to acquire stats lock\n"); - - bp->stats_started = false; + bool update = false; bnx2x_stats_comp(bp); @@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } - - up(&bp->stats_sema); } static void bnx2x_stats_do_nothing(struct bnx2x *bp) @@ -1410,18 +1363,28 @@ static const struct { void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { - enum bnx2x_stats_state state; - void (*action)(struct bnx2x *bp); + enum bnx2x_stats_state state = bp->stats_state; + if (unlikely(bp->panic)) return; - spin_lock_bh(&bp->stats_lock); - state = bp->stats_state; + /* Statistics update run from timer context, and we don't want to stop + * that context in case someone is in the middle of a transition. + * For other events, wait a bit until lock is taken. + */ + if (!mutex_trylock(&bp->stats_lock)) { + if (event == STATS_EVENT_UPDATE) + return; + + DP(BNX2X_MSG_STATS, + "Unlikely stats' lock contention [event %d]\n", event); + mutex_lock(&bp->stats_lock); + } + + bnx2x_stats_stm[state][event].action(bp); bp->stats_state = bnx2x_stats_stm[state][event].next_state; - action = bnx2x_stats_stm[state][event].action; - spin_unlock_bh(&bp->stats_lock); - action(bp); + mutex_unlock(&bp->stats_lock); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", @@ -1620,7 +1583,7 @@ void bnx2x_memset_stats(struct bnx2x *bp) if (bp->port.pmf && bp->port.port_stx) bnx2x_port_stats_base_init(bp); - /* mark the end of statistics initializiation */ + /* mark the end of statistics initialization */ bp->stats_init = false; } @@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, } } -void bnx2x_stats_safe_exec(struct bnx2x *bp, - void (func_to_exec)(void *cookie), - void *cookie){ - if (down_timeout(&bp->stats_sema, HZ/10)) - BNX2X_ERR("Unable to acquire stats lock\n"); +int bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie) +{ + int cnt = 10, rc = 0; + + /* Wait for statistics to end [while blocking further requests], + * then run supplied function 'safely'. + */ + mutex_lock(&bp->stats_lock); + bnx2x_stats_comp(bp); + while (bp->stats_pending && cnt--) + if (bnx2x_storm_stats_update(bp)) + usleep_range(1000, 2000); + if (bp->stats_pending) { + BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n"); + rc = -EBUSY; + goto out; + } + func_to_exec(cookie); - __bnx2x_stats_start(bp); - up(&bp->stats_sema); + +out: + /* No need to restart statistics - if they're enabled, the timer + * will restart the statistics. + */ + mutex_unlock(&bp->stats_lock); + + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 2beceaefdeea..965539a9dabe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -539,9 +539,9 @@ struct bnx2x; void bnx2x_memset_stats(struct bnx2x *bp); void bnx2x_stats_init(struct bnx2x *bp); void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); -void bnx2x_stats_safe_exec(struct bnx2x *bp, - void (func_to_exec)(void *cookie), - void *cookie); +int bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie); /** * bnx2x_save_statistics - save statistics when unloading. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index be40eabc5304..15b2d1647560 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -800,7 +800,7 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp, req->rss_key_size = T_ETH_RSS_KEY; req->rss_result_mask = params->rss_result_mask; - /* flags handled individually for backward/forward compatability */ + /* flags handled individually for backward/forward compatibility */ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED)) req->rss_flags |= VFPF_RSS_MODE_DISABLED; if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR)) @@ -1869,7 +1869,7 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, rss.rss_obj = &vf->rss_conf_obj; rss.rss_result_mask = rss_tlv->rss_result_mask; - /* flags handled individually for backward/forward compatability */ + /* flags handled individually for backward/forward compatibility */ rss.rss_flags = 0; rss.ramrod_flags = 0; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index f05fab65d78a..17c145fdf3ff 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,7 +1,7 @@ /* cnic.c: QLogic CNIC core network driver. * * Copyright (c) 2006-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,11 +58,11 @@ #define CNIC_MODULE_NAME "cnic" static char version[] = - "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; + "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " "Chen (zongxi@broadcom.com"); -MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver"); +MODULE_DESCRIPTION("QLogic cnic Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(CNIC_MODULE_VERSION); diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8bb36c1c4d68..ef6125b0ee3e 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,7 +1,7 @@ -/* cnic_if.h: QLogic CNIC core network driver. +/* cnic_if.h: QLogic cnic core network driver. * * Copyright (c) 2006-2014 Broadcom Corporation - * Copyright (c) 2014 QLogic Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,8 +15,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.20" -#define CNIC_MODULE_RELDATE "March 14, 2014" +#define CNIC_MODULE_VERSION "2.5.21" +#define CNIC_MODULE_RELDATE "January 29, 2015" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff83c46bc389..6043734ea613 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -54,8 +54,10 @@ /* Default highest priority queue for multi queue support */ #define GENET_Q0_PRIORITY 0 -#define GENET_DEFAULT_BD_CNT \ - (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) #define RX_BUF_LENGTH 2048 #define SKB_ALIGNMENT 32 @@ -195,6 +197,14 @@ enum dma_reg { DMA_PRIORITY_0, DMA_PRIORITY_1, DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, }; static const u8 bcmgenet_dma_regs_v3plus[] = { @@ -206,6 +216,14 @@ static const u8 bcmgenet_dma_regs_v3plus[] = { [DMA_PRIORITY_0] = 0x30, [DMA_PRIORITY_1] = 0x34, [DMA_PRIORITY_2] = 0x38, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, }; static const u8 bcmgenet_dma_regs_v2[] = { @@ -487,6 +505,7 @@ enum bcmgenet_stat_type { BCMGENET_STAT_MIB_TX, BCMGENET_STAT_RUNT, BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, }; struct bcmgenet_stats { @@ -515,6 +534,7 @@ struct bcmgenet_stats { #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) #define STAT_GENET_MISC(str, m, offset) { \ .stat_string = str, \ @@ -614,9 +634,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { UMAC_RBUF_OVFL_CNT), STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), - STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), - STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), - STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), }; #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) @@ -668,6 +688,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) s = &bcmgenet_gstrings_stats[i]; switch (s->type) { case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: continue; case BCMGENET_STAT_MIB_RX: case BCMGENET_STAT_MIB_TX: @@ -826,9 +847,10 @@ static struct ethtool_ops bcmgenet_ethtool_ops = { }; /* Power down the unimac, based on mode. */ -static void bcmgenet_power_down(struct bcmgenet_priv *priv, +static int bcmgenet_power_down(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode) { + int ret = 0; u32 reg; switch (mode) { @@ -837,7 +859,7 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv, break; case GENET_POWER_WOL_MAGIC: - bcmgenet_wol_power_down_cfg(priv, mode); + ret = bcmgenet_wol_power_down_cfg(priv, mode); break; case GENET_POWER_PASSIVE: @@ -847,11 +869,15 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv, reg |= (EXT_PWR_DOWN_PHY | EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); } break; default: break; } + + return 0; } static void bcmgenet_power_up(struct bcmgenet_priv *priv, @@ -920,7 +946,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, tx_cb_ptr = ring->cbs; tx_cb_ptr += ring->write_ptr - ring->cb_ptr; - tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; + /* Advancing local write pointer */ if (ring->write_ptr == ring->end_ptr) ring->write_ptr = ring->cb_ptr; @@ -938,74 +964,87 @@ static void bcmgenet_free_cb(struct enet_cb *cb) dma_unmap_addr_set(cb, dma_addr, 0); } -static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, - struct bcmgenet_tx_ring *ring) +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) { - bcmgenet_intrl2_0_writel(priv, - UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, INTRL2_CPU_MASK_SET); } -static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, - struct bcmgenet_tx_ring *ring) +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) { - bcmgenet_intrl2_0_writel(priv, - UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, INTRL2_CPU_MASK_CLEAR); } -static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, - struct bcmgenet_tx_ring *ring) +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) { - bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), INTRL2_CPU_MASK_CLEAR); - priv->int1_mask &= ~(1 << ring->index); } -static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, - struct bcmgenet_tx_ring *ring) +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) { - bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, INTRL2_CPU_MASK_SET); - priv->int1_mask |= (1 << ring->index); } /* Unlocked version of the reclaim routine */ -static void __bcmgenet_tx_reclaim(struct net_device *dev, - struct bcmgenet_tx_ring *ring) +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); - int last_tx_cn, last_c_index, num_tx_bds; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; - unsigned int bds_compl; + unsigned int pkts_compl = 0; unsigned int c_index; + unsigned int txbds_ready; + unsigned int txbds_processed = 0; /* Compute how many buffers are transmitted since last xmit call */ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); - txq = netdev_get_tx_queue(dev, ring->queue); - - last_c_index = ring->c_index; - num_tx_bds = ring->size; + c_index &= DMA_C_INDEX_MASK; - c_index &= (num_tx_bds - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; + if (likely(c_index >= ring->c_index)) + txbds_ready = c_index - ring->c_index; else - last_tx_cn = num_tx_bds - last_c_index + c_index; + txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; netif_dbg(priv, tx_done, dev, - "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", - __func__, ring->index, - c_index, last_tx_cn, last_c_index); + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); /* Reclaim transmitted buffers */ - while (last_tx_cn-- > 0) { - tx_cb_ptr = ring->cbs + last_c_index; - bds_compl = 0; + while (txbds_processed < txbds_ready) { + tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; if (tx_cb_ptr->skb) { - bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; + pkts_compl++; + dev->stats.tx_packets++; dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, dma_unmap_addr(tx_cb_ptr, dma_addr), @@ -1021,30 +1060,55 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, DMA_TO_DEVICE); dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); } - dev->stats.tx_packets++; - ring->free_bds += bds_compl; - last_c_index++; - last_c_index &= (num_tx_bds - 1); + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; } - if (ring->free_bds > (MAX_SKB_FRAGS + 1)) - ring->int_disable(priv, ring); + ring->free_bds += txbds_processed; + ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(dev, ring->queue); + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + } - ring->c_index = c_index; + return pkts_compl; } -static void bcmgenet_tx_reclaim(struct net_device *dev, +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { + unsigned int released; unsigned long flags; spin_lock_irqsave(&ring->lock, flags); - __bcmgenet_tx_reclaim(dev, ring); + released = __bcmgenet_tx_reclaim(dev, ring); spin_unlock_irqrestore(&ring->lock, flags); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + + work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; } static void bcmgenet_tx_reclaim_all(struct net_device *dev) @@ -1105,11 +1169,6 @@ static int bcmgenet_xmit_single(struct net_device *dev, dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); - /* Decrement total BD count and advance our write pointer */ - ring->free_bds -= 1; - ring->prod_index += 1; - ring->prod_index &= DMA_P_INDEX_MASK; - return 0; } @@ -1148,11 +1207,6 @@ static int bcmgenet_xmit_frag(struct net_device *dev, (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); - - ring->free_bds -= 1; - ring->prod_index += 1; - ring->prod_index &= DMA_P_INDEX_MASK; - return 0; } @@ -1296,121 +1350,128 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); - /* we kept a software copy of how much we should advance the TDMA - * producer index, now write it down to the hardware - */ - bcmgenet_tdma_ring_writel(priv, ring->index, - ring->prod_index, TDMA_PROD_INDEX); + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; - if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); - ring->int_enable(priv, ring); - } + if (!skb->xmit_more || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); out: spin_unlock_irqrestore(&ring->lock, flags); return ret; } - -static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) { struct device *kdev = &priv->pdev->dev; struct sk_buff *skb; + struct sk_buff *rx_skb; dma_addr_t mapping; - int ret; + /* Allocate a new Rx skb */ skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); - if (!skb) - return -ENOMEM; + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } - /* a caller did not release this control block */ - WARN_ON(cb->skb != NULL); - cb->skb = skb; - mapping = dma_map_single(kdev, skb->data, - priv->rx_buf_len, DMA_FROM_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { priv->mib.rx_dma_failed++; - bcmgenet_free_cb(cb); + dev_kfree_skb_any(skb); netif_err(priv, rx_err, priv->dev, - "%s DMA map failed\n", __func__); - return ret; + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; } - dma_unmap_addr_set(cb, dma_addr, mapping); - /* assign packet, prepare descriptor, and advance pointer */ - - dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); - - /* turn on the newly assigned BD for DMA to use */ - priv->rx_bd_assign_index++; - priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = cb->skb; + if (likely(rx_skb)) + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + priv->rx_buf_len, DMA_FROM_DEVICE); - priv->rx_bd_assign_ptr = priv->rx_bds + - (priv->rx_bd_assign_index * DMA_DESC_SIZE); + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dmadesc_set_addr(priv, cb->bd_addr, mapping); - return 0; + /* Return the current Rx skb to caller */ + return rx_skb; } /* bcmgenet_desc_rx - descriptor based rx process. * this could be called from bottom half, or from NAPI polling method. */ -static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned int budget) { + struct bcmgenet_priv *priv = ring->priv; struct net_device *dev = priv->dev; struct enet_cb *cb; struct sk_buff *skb; u32 dma_length_status; unsigned long dma_flag; - int len, err; + int len; unsigned int rxpktprocessed = 0, rxpkttoprocess; unsigned int p_index; + unsigned int discards; unsigned int chksum_ok = 0; - p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX); + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + dev->stats.rx_missed_errors += discards; + dev->stats.rx_errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + p_index &= DMA_P_INDEX_MASK; - if (p_index < priv->rx_c_index) - rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - - priv->rx_c_index + p_index; + if (likely(p_index >= ring->c_index)) + rxpkttoprocess = p_index - ring->c_index; else - rxpkttoprocess = p_index - priv->rx_c_index; + rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + + p_index; netif_dbg(priv, rx_status, dev, "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { - cb = &priv->rx_cbs[priv->rx_read_ptr]; - skb = cb->skb; + cb = &priv->rx_cbs[ring->read_ptr]; + skb = bcmgenet_rx_refill(priv, cb); - /* We do not have a backing SKB, so we do not have a - * corresponding DMA mapping for this incoming packet since - * bcmgenet_rx_refill always either has both skb and mapping or - * none. - */ if (unlikely(!skb)) { dev->stats.rx_dropped++; dev->stats.rx_errors++; - goto refill; + goto next; } - /* Unmap the packet contents such that we can use the - * RSV from the 64 bytes descriptor when enabled and save - * a 32-bits register read - */ - dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), - priv->rx_buf_len, DMA_FROM_DEVICE); - if (!priv->desc_64b_en) { dma_length_status = - dmadesc_get_length_status(priv, - priv->rx_bds + - (priv->rx_read_ptr * - DMA_DESC_SIZE)); + dmadesc_get_length_status(priv, cb->bd_addr); } else { struct status_64 *status; @@ -1426,18 +1487,18 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, netif_dbg(priv, rx_status, dev, "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", - __func__, p_index, priv->rx_c_index, - priv->rx_read_ptr, dma_length_status); + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); dev->stats.rx_dropped++; dev->stats.rx_errors++; - dev_kfree_skb_any(cb->skb); - cb->skb = NULL; - goto refill; + dev_kfree_skb_any(skb); + goto next; } + /* report errors */ if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | DMA_RX_OV | @@ -1456,11 +1517,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, dev->stats.rx_length_errors++; dev->stats.rx_dropped++; dev->stats.rx_errors++; - - /* discard the packet and advance consumer index.*/ - dev_kfree_skb_any(cb->skb); - cb->skb = NULL; - goto refill; + dev_kfree_skb_any(skb); + goto next; } /* error packet */ chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && @@ -1492,47 +1550,61 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, dev->stats.multicast++; /* Notify kernel */ - napi_gro_receive(&priv->napi, skb); - cb->skb = NULL; + napi_gro_receive(&ring->napi, skb); netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); - /* refill RX path on the current control block */ -refill: - err = bcmgenet_rx_refill(priv, cb); - if (err) { - priv->mib.alloc_rx_buff_failed++; - netif_err(priv, rx_err, dev, "Rx refill failed\n"); - } - +next: rxpktprocessed++; - priv->rx_read_ptr++; - priv->rx_read_ptr &= (priv->num_rx_bds - 1); + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); } return rxpktprocessed; } +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + unsigned int work_done; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete(napi); + ring->int_enable(ring); + } + + return work_done; +} + /* Assign skb to RX DMA descriptor. */ -static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) { struct enet_cb *cb; - int ret = 0; + struct sk_buff *skb; int i; - netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); /* loop here for each buffer needing assign */ - for (i = 0; i < priv->num_rx_bds; i++) { - cb = &priv->rx_cbs[priv->rx_bd_assign_index]; - if (cb->skb) - continue; - - ret = bcmgenet_rx_refill(priv, cb); - if (ret) - break; + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_kfree_skb_any(skb); + if (!cb->skb) + return -ENOMEM; } - return ret; + return 0; } static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) @@ -1620,7 +1692,10 @@ static int init_umac(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; int ret; - u32 reg, cpu_mask_clear; + u32 reg; + u32 int0_enable = 0; + u32 int1_enable = 0; + int i; dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); @@ -1647,16 +1722,21 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intr_disable(priv); - cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; + /* Enable Rx default queue 16 interrupts */ + int0_enable |= UMAC_IRQ_RXDMA_DONE; - dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); + /* Enable Tx default queue 16 interrupts */ + int0_enable |= UMAC_IRQ_TXDMA_DONE; /* Monitor cable plug/unplugged event for internal PHY */ if (phy_is_internal(priv->phydev)) { - cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); + int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->ext_phy) { - cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); + int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + reg = bcmgenet_bp_mc_get(priv); reg |= BIT(priv->hw_params->bp_in_en_shift); @@ -1670,9 +1750,18 @@ static int init_umac(struct bcmgenet_priv *priv) /* Enable MDIO interrupts on GENET v3+ */ if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) - cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; + int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); - bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); + /* Enable Rx priority queue interrupts */ + for (i = 0; i < priv->hw_params->rx_queues; ++i) + int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); + + /* Enable Tx priority queue interrupts */ + for (i = 0; i < priv->hw_params->tx_queues; ++i) + int1_enable |= (1 << i); + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); /* Enable rx/tx engine.*/ dev_dbg(kdev, "done init umac\n"); @@ -1680,19 +1769,17 @@ static int init_umac(struct bcmgenet_priv *priv) return 0; } -/* Initialize all house-keeping variables for a TX ring, along - * with corresponding hardware registers - */ +/* Initialize a Tx ring along with corresponding hardware registers */ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int index, unsigned int size, - unsigned int write_ptr, unsigned int end_ptr) + unsigned int start_ptr, unsigned int end_ptr) { struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; u32 words_per_bd = WORDS_PER_BD(priv); u32 flow_period_val = 0; - unsigned int first_bd; spin_lock_init(&ring->lock); + ring->priv = priv; ring->index = index; if (index == DESC_INDEX) { ring->queue = 0; @@ -1703,12 +1790,13 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, ring->int_enable = bcmgenet_tx_ring_int_enable; ring->int_disable = bcmgenet_tx_ring_int_disable; } - ring->cbs = priv->tx_cbs + write_ptr; + ring->cbs = priv->tx_cbs + start_ptr; ring->size = size; + ring->clean_ptr = start_ptr; ring->c_index = 0; ring->free_bds = size; - ring->write_ptr = write_ptr; - ring->cb_ptr = write_ptr; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; ring->end_ptr = end_ptr - 1; ring->prod_index = 0; @@ -1722,19 +1810,16 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, /* Disable rate control for now */ bcmgenet_tdma_ring_writel(priv, index, flow_period_val, TDMA_FLOW_PERIOD); - /* Unclassified traffic goes to ring 16 */ bcmgenet_tdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); - first_bd = write_ptr; - /* Set start and end address, read and write pointers */ - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, DMA_START_ADDR); - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_READ_PTR); - bcmgenet_tdma_ring_writel(priv, index, first_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); @@ -1742,118 +1827,302 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, /* Initialize a RDMA ring */ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, - unsigned int index, unsigned int size) + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) { + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; u32 words_per_bd = WORDS_PER_BD(priv); int ret; - priv->num_rx_bds = TOTAL_DESC; - priv->rx_bds = priv->base + priv->hw_params->rdma_offset; - priv->rx_bd_assign_ptr = priv->rx_bds; - priv->rx_bd_assign_index = 0; - priv->rx_c_index = 0; - priv->rx_read_ptr = 0; - priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), - GFP_KERNEL); - if (!priv->rx_cbs) - return -ENOMEM; + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; - ret = bcmgenet_alloc_rx_buffers(priv); - if (ret) { - kfree(priv->rx_cbs); + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) return ret; - } - bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); bcmgenet_rdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); - bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); - bcmgenet_rdma_ring_writel(priv, index, - words_per_bd * size - 1, DMA_END_ADDR); bcmgenet_rdma_ring_writel(priv, index, (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); - bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); return ret; } -/* init multi xmit queues, only available for GENET2+ - * the queue is partitioned as follows: +static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues * - * queue 0 - 3 is priority based, each one has 32 descriptors, + * Queues 0-3 are priority-based, each one has 32 descriptors, * with queue 0 being the highest priority queue. * - * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT - * descriptors: 256 - (number of tx queues * bds per queues) = 128 - * descriptors. + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. * - * The transmit control block pool is then partitioned as following: - * - tx_cbs[0...127] are for queue 16 - * - tx_ring_cbs[0] points to tx_cbs[128..159] - * - tx_ring_cbs[1] points to tx_cbs[160..191] - * - tx_ring_cbs[2] points to tx_cbs[192..223] - * - tx_ring_cbs[3] points to tx_cbs[224..255] + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] */ -static void bcmgenet_init_multiq(struct net_device *dev) +static void bcmgenet_init_tx_queues(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - unsigned int i, dma_enable; - u32 reg, dma_ctrl, ring_cfg = 0; + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; u32 dma_priority[3] = {0, 0, 0}; - if (!netif_is_multiqueue(dev)) { - netdev_warn(dev, "called with non multi queue aware HW\n"); - return; - } - dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); dma_enable = dma_ctrl & DMA_EN; dma_ctrl &= ~DMA_EN; bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + dma_ctrl = 0; + ring_cfg = 0; + /* Enable strict priority arbiter mode */ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + /* Initialize Tx priority queues */ for (i = 0; i < priv->hw_params->tx_queues; i++) { - /* first 64 tx_cbs are reserved for default tx queue - * (ring 16) - */ - bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, - i * priv->hw_params->bds_cnt, - (i + 1) * priv->hw_params->bds_cnt); - - /* Configure ring as descriptor ring and setup priority */ - ring_cfg |= 1 << i; - dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); - + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(i)] |= ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); } - /* Set ring 16 priority and program the hardware registers */ + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + /* Initialize Tx NAPI */ + bcmgenet_init_tx_napi(priv); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Initialize Rx NAPI */ + bcmgenet_init_rx_napi(priv); + /* Enable rings */ - reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); - reg |= ring_cfg; - bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); /* Configure ring as descriptor ring and re-enable DMA if enabled */ - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg |= dma_ctrl; if (dma_enable) - reg |= DMA_EN; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; } static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) @@ -1911,6 +2180,9 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { int i; + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + /* disable DMA */ bcmgenet_dma_teardown(priv); @@ -1930,21 +2202,23 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) static int bcmgenet_init_dma(struct bcmgenet_priv *priv) { int ret; + unsigned int i; + struct enet_cb *cb; - netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); - - /* by default, enable ring 16 (descriptor based) */ - ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); - if (ret) { - netdev_err(priv->dev, "failed to initialize RX ring\n"); - return ret; - } + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); - /* init rDma */ - bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; - /* Init tDma */ - bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } /* Initialize common TX ring structures */ priv->tx_bds = priv->base + priv->hw_params->tdma_offset; @@ -1952,46 +2226,35 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), GFP_KERNEL); if (!priv->tx_cbs) { - bcmgenet_fini_dma(priv); + kfree(priv->rx_cbs); return -ENOMEM; } - /* initialize multi xmit queue */ - bcmgenet_init_multiq(priv->dev); - - /* initialize special ring 16 */ - bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, - priv->hw_params->tx_queues * - priv->hw_params->bds_cnt, - TOTAL_DESC); - - return 0; -} + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } -/* NAPI polling method*/ -static int bcmgenet_poll(struct napi_struct *napi, int budget) -{ - struct bcmgenet_priv *priv = container_of(napi, - struct bcmgenet_priv, napi); - unsigned int work_done; + /* Init rDma */ + bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); - /* tx reclaim */ - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } - work_done = bcmgenet_desc_rx(priv, budget); + /* Init tDma */ + bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); - /* Advancing our consumer index*/ - priv->rx_c_index += work_done; - priv->rx_c_index &= DMA_C_INDEX_MASK; - bcmgenet_rdma_ring_writel(priv, DESC_INDEX, - priv->rx_c_index, RDMA_CONS_INDEX); - if (work_done < budget) { - napi_complete(napi); - bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, - INTRL2_CPU_MASK_CLEAR); - } + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); - return work_done; + return 0; } /* Interrupt bottom half */ @@ -2011,77 +2274,100 @@ static void bcmgenet_irq_task(struct work_struct *work) /* Link UP/DOWN event */ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && - (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { + (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) { phy_mac_interrupt(priv->phydev, - priv->irq0_stat & UMAC_IRQ_LINK_UP); - priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); + !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); + priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; } } -/* bcmgenet_isr1: interrupt handler for ring buffer. */ +/* bcmgenet_isr1: handle Rx and Tx priority queues */ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) { struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; unsigned int index; /* Save irq status for bottom-half processing. */ priv->irq1_stat = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & - ~priv->int1_mask; + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + /* clear interrupts */ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); netif_dbg(priv, intr, priv->dev, "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); - /* Check the MBDONE interrupts. - * packet is done, reclaim descriptors - */ - if (priv->irq1_stat & 0x0000ffff) { - index = 0; - for (index = 0; index < 16; index++) { - if (priv->irq1_stat & (1 << index)) - bcmgenet_tx_reclaim(priv->dev, - &priv->tx_rings[index]); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(priv->irq1_stat & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule(&tx_ring->napi); } } + return IRQ_HANDLED; } -/* bcmgenet_isr0: Handle various interrupts. */ +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) { struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; /* Save irq status for bottom-half processing. */ priv->irq0_stat = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + /* clear interrupts */ bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); netif_dbg(priv, intr, priv->dev, "IRQ=0x%x\n", priv->irq0_stat); - if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { - /* We use NAPI(software interrupt throttling, if - * Rx Descriptor throttling is not used. - * Disable interrupt, will be enabled in the poll method. - */ - if (likely(napi_schedule_prep(&priv->napi))) { - bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, - INTRL2_CPU_MASK_SET); - __napi_schedule(&priv->napi); + if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule(&rx_ring->napi); } } - if (priv->irq0_stat & - (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { - /* Tx reclaim */ - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); + + if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule(&tx_ring->napi); + } } + if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | UMAC_IRQ_PHY_DET_F | - UMAC_IRQ_LINK_UP | - UMAC_IRQ_LINK_DOWN | + UMAC_IRQ_LINK_EVENT | UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | UMAC_IRQ_MPD_R)) { @@ -2165,18 +2451,170 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) bcmgenet_tdma_writel(priv, reg, DMA_CTRL); } +static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, + u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + return !!(reg & (1 << (f_index % 32))); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) +{ + u32 f_index; + + for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++) + if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) + return f_index; + + return -ENOMEM; +} + +/* bcmgenet_hfb_add_filter + * + * Add new filter to Hardware Filter Block to match and direct Rx traffic to + * desired Rx queue. + * + * f_data is an array of unsigned 32-bit integers where each 32-bit integer + * provides filter data for 2 bytes (4 nibbles) of Rx frame: + * + * bits 31:20 - unused + * bit 19 - nibble 0 match enable + * bit 18 - nibble 1 match enable + * bit 17 - nibble 2 match enable + * bit 16 - nibble 3 match enable + * bits 15:12 - nibble 0 data + * bits 11:8 - nibble 1 data + * bits 7:4 - nibble 2 data + * bits 3:0 - nibble 3 data + * + * Example: + * In order to match: + * - Ethernet frame type = 0x0800 (IP) + * - IP version field = 4 + * - IP protocol field = 0x11 (UDP) + * + * The following filter is needed: + * u32 hfb_filter_ipv4_udp[] = { + * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, + * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, + * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, + * }; + * + * To add the filter to HFB and direct the traffic to Rx queue 0, call: + * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, + * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); + */ +int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, + u32 f_length, u32 rx_queue) +{ + int f_index; + u32 i; + + f_index = bcmgenet_hfb_find_unused_filter(priv); + if (f_index < 0) + return -ENOMEM; + + if (f_length > priv->hw_params->hfb_filter_size) + return -EINVAL; + + for (i = 0; i < f_length; i++) + bcmgenet_hfb_writel(priv, f_data[i], + (f_index * priv->hw_params->hfb_filter_size + i) * + sizeof(u32)); + + bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); + bcmgenet_hfb_enable_filter(priv, f_index); + bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL); + + return 0; +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt * + priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_clear(priv); +} + static void bcmgenet_netif_start(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); /* Start the network engine */ - napi_enable(&priv->napi); + bcmgenet_enable_rx_napi(priv); + bcmgenet_enable_tx_napi(priv); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); - if (phy_is_internal(priv->phydev)) - bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - netif_tx_start_all_queues(dev); phy_start(priv->phydev); @@ -2195,6 +2633,12 @@ static int bcmgenet_open(struct net_device *dev) if (!IS_ERR(priv->clk)) clk_prepare_enable(priv->clk); + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (phy_is_internal(priv->phydev)) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + /* take MAC out of reset */ bcmgenet_umac_reset(priv); @@ -2224,12 +2668,15 @@ static int bcmgenet_open(struct net_device *dev) ret = bcmgenet_init_dma(priv); if (ret) { netdev_err(dev, "failed to initialize DMA\n"); - goto err_fini_dma; + goto err_clk_disable; } /* Always enable ring 16 - descriptor ring */ bcmgenet_enable_dma(priv, dma_ctrl); + /* HFB init */ + bcmgenet_hfb_init(priv); + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, dev->name, priv); if (ret < 0) { @@ -2269,10 +2716,10 @@ static void bcmgenet_netif_stop(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); netif_tx_stop_all_queues(dev); - napi_disable(&priv->napi); phy_stop(priv->phydev); - bcmgenet_intr_disable(priv); + bcmgenet_disable_rx_napi(priv); + bcmgenet_disable_tx_napi(priv); /* Wait for pending work items to complete. Since interrupts are * disabled no new work will be scheduled. @@ -2315,12 +2762,12 @@ static int bcmgenet_close(struct net_device *dev) free_irq(priv->irq1, priv); if (phy_is_internal(priv->phydev)) - bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); - return 0; + return ret; } static void bcmgenet_timeout(struct net_device *dev) @@ -2437,8 +2884,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = { static struct bcmgenet_hw_params bcmgenet_hw_params[] = { [GENET_V1] = { .tx_queues = 0, + .tx_bds_per_q = 0, .rx_queues = 0, - .bds_cnt = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2450,8 +2898,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V2] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2466,11 +2915,13 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V3] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, + .hfb_filter_size = 128, .qtag_mask = 0x3F, .tbuf_offset = 0x0600, .hfb_offset = 0x8000, @@ -2478,15 +2929,18 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { .rdma_offset = 0x10000, .tdma_offset = 0x11000, .words_per_bd = 2, - .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, }, [GENET_V4] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, + .hfb_filter_size = 128, .qtag_mask = 0x3F, .tbuf_offset = 0x0600, .hfb_offset = 0x8000, @@ -2494,7 +2948,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { .rdma_offset = 0x2000, .tdma_offset = 0x4000, .words_per_bd = 3, - .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, }, }; @@ -2583,14 +3038,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) #endif pr_debug("Configuration for version: %d\n" - "TXq: %1d, RXq: %1d, BDs: %1d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" "BP << en: %2d, BP msk: 0x%05x\n" "HFB count: %2d, QTAQ msk: 0x%05x\n" "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" "RDMA: 0x%05x, TDMA: 0x%05x\n" "Words/BD: %d\n", priv->version, - params->tx_queues, params->rx_queues, params->bds_cnt, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, params->bp_in_en_shift, params->bp_in_mask, params->hfb_filter_cnt, params->qtag_mask, params->tbuf_offset, params->hfb_offset, @@ -2618,8 +3074,9 @@ static int bcmgenet_probe(struct platform_device *pdev) struct resource *r; int err = -EIO; - /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ - dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); if (!dev) { dev_err(&pdev->dev, "can't allocate net device\n"); return -ENOMEM; @@ -2665,7 +3122,6 @@ static int bcmgenet_probe(struct platform_device *pdev) dev->watchdog_timeo = 2 * HZ; dev->ethtool_ops = &bcmgenet_ethtool_ops; dev->netdev_ops = &bcmgenet_netdev_ops; - netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); @@ -2798,14 +3254,16 @@ static int bcmgenet_suspend(struct device *d) /* Prepare the device for Wake-on-LAN and switch to the slow clock */ if (device_may_wakeup(d) && priv->wolopts) { - bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); clk_prepare_enable(priv->clk_wol); + } else if (phy_is_internal(priv->phydev)) { + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); } /* Turn off the clocks */ clk_disable_unprepare(priv->clk); - return 0; + return ret; } static int bcmgenet_resume(struct device *d) @@ -2824,6 +3282,12 @@ static int bcmgenet_resume(struct device *d) if (ret) return ret; + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (phy_is_internal(priv->phydev)) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + bcmgenet_umac_reset(priv); ret = init_umac(priv); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b36ddec0cc0a..6f2887a5e0be 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -293,6 +293,7 @@ struct bcmgenet_mib_counters { #define UMAC_IRQ_PHY_DET_F (1 << 3) #define UMAC_IRQ_LINK_UP (1 << 4) #define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) #define UMAC_IRQ_UMAC (1 << 6) #define UMAC_IRQ_UMAC_TSV (1 << 7) #define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) @@ -303,13 +304,22 @@ struct bcmgenet_mib_counters { #define UMAC_IRQ_RXDMA_MBDONE (1 << 13) #define UMAC_IRQ_RXDMA_PDONE (1 << 14) #define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE (UMAC_IRQ_RXDMA_PDONE | \ + UMAC_IRQ_RXDMA_BDONE) #define UMAC_IRQ_TXDMA_MBDONE (1 << 16) #define UMAC_IRQ_TXDMA_PDONE (1 << 17) #define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE (UMAC_IRQ_TXDMA_PDONE | \ + UMAC_IRQ_TXDMA_BDONE) /* Only valid for GENETv3+ */ #define UMAC_IRQ_MDIO_DONE (1 << 23) #define UMAC_IRQ_MDIO_ERROR (1 << 24) +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + /* Register block offsets */ #define GENET_SYS_OFF 0x0000 #define GENET_GR_BRIDGE_OFF 0x0040 @@ -354,6 +364,7 @@ struct bcmgenet_mib_counters { #define EXT_GPHY_CTRL 0x1C #define EXT_CFG_IDDQ_BIAS (1 << 0) #define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) #define EXT_GPHY_RESET (1 << 5) /* DMA rings size */ @@ -497,17 +508,20 @@ enum bcmgenet_version { #define GENET_HAS_40BITS (1 << 0) #define GENET_HAS_EXT (1 << 1) #define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) /* BCMGENET hardware parameters, keep this structure nicely aligned * since it is going to be used in hot paths */ struct bcmgenet_hw_params { u8 tx_queues; + u8 tx_bds_per_q; u8 rx_queues; - u8 bds_cnt; + u8 rx_bds_per_q; u8 bp_in_en_shift; u32 bp_in_mask; u8 hfb_filter_cnt; + u8 hfb_filter_size; u8 qtag_mask; u16 tbuf_offset; u32 hfb_offset; @@ -520,20 +534,36 @@ struct bcmgenet_hw_params { struct bcmgenet_tx_ring { spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ unsigned int index; /* ring index */ unsigned int queue; /* queue index */ struct enet_cb *cbs; /* tx ring buffer control block*/ unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ unsigned int c_index; /* last consumer index of each ring*/ unsigned int free_bds; /* # of free bds for each ring */ unsigned int write_ptr; /* Tx ring write pointer SW copy */ unsigned int prod_index; /* Tx ring producer index SW copy */ unsigned int cb_ptr; /* Tx ring initial CB ptr */ unsigned int end_ptr; /* Tx ring end CB ptr */ - void (*int_enable)(struct bcmgenet_priv *priv, - struct bcmgenet_tx_ring *); - void (*int_disable)(struct bcmgenet_priv *priv, - struct bcmgenet_tx_ring *); + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; }; /* device context */ @@ -541,11 +571,6 @@ struct bcmgenet_priv { void __iomem *base; enum bcmgenet_version version; struct net_device *dev; - u32 int0_mask; - u32 int1_mask; - - /* NAPI for descriptor based rx */ - struct napi_struct napi ____cacheline_aligned; /* transmit variables */ void __iomem *tx_bds; @@ -556,13 +581,11 @@ struct bcmgenet_priv { /* receive variables */ void __iomem *rx_bds; - void __iomem *rx_bd_assign_ptr; - int rx_bd_assign_index; struct enet_cb *rx_cbs; unsigned int num_rx_bds; unsigned int rx_buf_len; - unsigned int rx_read_ptr; - unsigned int rx_c_index; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; /* other misc variables */ struct bcmgenet_hw_params *hw_params; @@ -649,6 +672,7 @@ int bcmgenet_mii_init(struct net_device *dev); int bcmgenet_mii_config(struct net_device *dev, bool init); void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_reset(struct net_device *dev); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); /* Wake-on-LAN routines */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) return -EINVAL; + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (wol->wolopts & WAKE_MAGICSECURE) { bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), UMAC_MPD_PW_MS); bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), UMAC_MPD_PW_LS); - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg |= MPD_PW_EN; - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } else { + reg &= ~MPD_PW_EN; } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 446889cc3c6a..e7651b3c6c57 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -168,7 +168,7 @@ void bcmgenet_mii_reset(struct net_device *dev) } } -static void bcmgenet_ephy_power_up(struct net_device *dev) +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); u32 reg = 0; @@ -178,14 +178,25 @@ static void bcmgenet_ephy_power_up(struct net_device *dev) return; reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); - reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); - reg |= EXT_GPHY_RESET; - bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); - mdelay(2); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); - reg &= ~EXT_GPHY_RESET; + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); - udelay(20); + udelay(60); } static void bcmgenet_internal_phy_setup(struct net_device *dev) @@ -193,8 +204,8 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); u32 reg; - /* Power up EPHY */ - bcmgenet_ephy_power_up(dev); + /* Power up PHY */ + bcmgenet_phy_power_set(dev, true); /* enable APD */ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_PWR_DN_EN_LD; @@ -451,6 +462,15 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) return 0; } +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + if (dev && dev->phydev && status) + status->link = dev->phydev->link; + + return 0; +} + static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; @@ -502,6 +522,13 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) dev_err(kdev, "failed to register fixed PHY device\n"); return -ENODEV; } + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) { + ret = fixed_phy_set_link_update( + phydev, bcmgenet_fixed_phy_link_update); + if (!ret) + phydev->link = 0; + } } priv->phydev = phydev; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 23a019cee279..1270b189a9a2 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6217,10 +6217,9 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); tg3_full_lock(tp, 0); @@ -6228,19 +6227,18 @@ static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) ns += tp->ptp_adjust; tg3_full_unlock(tp); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } static int tg3_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { u64 ns; struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); - ns = timespec_to_ns(ts); + ns = timespec64_to_ns(ts); tg3_full_lock(tp, 0); tg3_refclk_write(tp, ns); @@ -6320,8 +6318,8 @@ static const struct ptp_clock_info tg3_ptp_caps = { .pps = 0, .adjfreq = tg3_ptp_adjfreq, .adjtime = tg3_ptp_adjtime, - .gettime = tg3_ptp_gettime, - .settime = tg3_ptp_settime, + .gettime64 = tg3_ptp_gettime, + .settime64 = tg3_ptp_settime, .enable = tg3_ptp_enable, }; @@ -7244,7 +7242,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) if (tnapi == &tp->napi[1] && tp->rx_refill) continue; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Reenable interrupts. */ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); @@ -7337,7 +7335,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) sblk->status &= ~SD_STATUS_UPDATED; if (likely(!tg3_has_work(tnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); tg3_int_reenable(tnapi); break; } diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig index 264155778857..4e8c0b6c57d0 100644 --- a/drivers/net/ethernet/brocade/Kconfig +++ b/drivers/net/ethernet/brocade/Kconfig @@ -1,9 +1,9 @@ # -# Brocade device configuration +# QLogic BR-series device configuration # config NET_VENDOR_BROCADE - bool "Brocade devices" + bool "QLogic BR-series devices" default y depends on PCI ---help--- @@ -13,8 +13,8 @@ config NET_VENDOR_BROCADE Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about Brocade cards. If you say Y, you will be asked for - your specific card in the following questions. + the questions about QLogic BR-series cards. If you say Y, you will be + asked for your specific card in the following questions. if NET_VENDOR_BROCADE diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile index b58238d2df6a..fec10f9b4558 100644 --- a/drivers/net/ethernet/brocade/Makefile +++ b/drivers/net/ethernet/brocade/Makefile @@ -1,5 +1,5 @@ # -# Makefile for the Brocade device drivers. +# Makefile for the QLogic BR-series device drivers. # obj-$(CONFIG_BNA) += bna/ diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig index dc2eb526fbf7..fe01279a8843 100644 --- a/drivers/net/ethernet/brocade/bna/Kconfig +++ b/drivers/net/ethernet/brocade/bna/Kconfig @@ -1,17 +1,17 @@ # -# Brocade network device configuration +# QLogic BR-series network device configuration # config BNA - tristate "Brocade 1010/1020 10Gb Ethernet Driver support" + tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support" depends on PCI ---help--- - This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet - cards. + This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable + Ethernet cards. To compile this driver as a module, choose M here: the module will be called bna. - For general information and support, go to the Brocade support + For general information and support, go to the QLogic support website at: - <http://support.brocade.com> + <http://support.qlogic.com> diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile index 6027302ae73a..6e10b99733a2 100644 --- a/drivers/net/ethernet/brocade/bna/Makefile +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -1,5 +1,6 @@ # -# Copyright (c) 2005-2010 Brocade Communications Systems, Inc. +# Copyright (c) 2005-2014 Brocade Communications Systems, Inc. +# Copyright (c) 2014-2015 QLogic Corporation. # All rights reserved. # diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 550d2521ba76..cf9f3956f198 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_cee.h" diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h index 93fde633d6f3..d04eef5d5a77 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_CEE_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h index ad004a4c3897..af25d8e8fae0 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* BFA common services */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index b7d8127c198f..3bfd9da92630 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h index b39c5f23974b..a37326d44fbb 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_CNA_H__ #define __BFA_DEFS_CNA_H__ @@ -134,7 +135,7 @@ struct bfa_cee_lldp_str { u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; }; -/* LLDP paramters */ +/* LLDP parameters */ struct bfa_cee_lldp_cfg { struct bfa_cee_lldp_str chassis_id; struct bfa_cee_lldp_str port_id; diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h index 7fb396fe679d..7a45cd0b594d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_MFG_COMM_H__ #define __BFA_DEFS_MFG_COMM_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h index ea9af9ae754d..a43b56002752 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_DEFS_STATUS_H__ #define __BFA_DEFS_STATUS_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 354ae9792bad..594a2ab36d31 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_ioc.h" @@ -1339,7 +1340,7 @@ bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, return true; } -/* Returns TRUE if major minor and maintainence are same. +/* Returns TRUE if major minor and maintenance are same. * If patch version are same, check for MD5 Checksum to be same. */ static bool @@ -2763,7 +2764,7 @@ bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, list_add_tail(¬ify->qe, &ioc->notify_q); } -#define BFA_MFG_NAME "Brocade" +#define BFA_MFG_NAME "QLogic" static void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, struct bfa_adapter_attr *ad_attr) diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 20cff7df4b55..effb7156e7a4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_IOC_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index d639558455cb..2e72445dbb4f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bfa_ioc.h" @@ -698,7 +699,7 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb) /* * Ignore mode and program for the max clock (which is FC16) - * Firmware/NFC will do the PLL init appropiately + * Firmware/NFC will do the PLL init appropriately */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c index 55067d0d25cf..c07d5b9372f4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* MSGQ module source file. */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h index a6a565a366dc..66bc8b5acd57 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFA_MSGQ_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 8c563a77cdf6..2bcde4042268 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFI_H__ #define __BFI_H__ @@ -158,8 +159,8 @@ enum bfi_asic_gen { }; enum bfi_asic_mode { - BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */ - BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */ + BFI_ASIC_MODE_FC = 1, /* FC up to 8G speed */ + BFI_ASIC_MODE_FC16 = 2, /* FC up to 16G speed */ BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */ BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */ }; diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h index 6704a4392973..bd605bee72ee 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BFI_CNA_H__ #define __BFI_CNA_H__ diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h index ae072dc5d238..bccca3bbadb8 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_enet.h +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* BNA Hardware and Firmware Interface */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h index c49fa312ddbd..2835b51eabec 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_reg.h +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,13 +11,14 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* - * bfi_reg.h ASIC register defines for all Brocade adapter ASICs + * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs */ #ifndef __BFI_REG_H__ @@ -221,7 +222,7 @@ enum { #define __PMM_1T_RESET_P 0x00000001 #define PMM_1T_RESET_REG_P1 0x00023c1c -/* Brocade 1860 Adapter specific defines */ +/* QLogic BR-series 1860 Adapter specific defines */ #define CT2_PCI_CPQ_BASE 0x00030000 #define CT2_PCI_APP_BASE 0x00030100 #define CT2_PCI_ETH_BASE 0x00030400 @@ -264,7 +265,7 @@ enum { #define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) /* - * Brocade 1860 adapter CPQ block registers + * QLogic BR-series 1860 adapter CPQ block registers */ #define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) #define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 1f512190d696..8ba72b1f36d9 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNA_H__ #define __BNA_H__ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 903466ef41c0..deb8da6ab9cc 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bna.h" diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index 2702d02e98d9..174af0e9d056 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ /* File for interrupt macros and functions */ @@ -362,7 +363,7 @@ struct bna_txq_wi_vector { /* TxQ Entry Structure * - * BEWARE: Load values into this structure with correct endianess. + * BEWARE: Load values into this structure with correct endianness. */ struct bna_txq_entry { union { diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 5fac411c52f4..8ab3a5f62706 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "bna.h" #include "bfi.h" diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index 621547cd3504..d0a7a566f5d6 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNA_TYPES_H__ #define __BNA_TYPES_H__ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7714d7790089..37072a83f9d6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/bitops.h> #include <linux/netdevice.h> @@ -3867,7 +3868,7 @@ bnad_module_init(void) { int err; - pr_info("Brocade 10G Ethernet driver - version: %s\n", + pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n", BNAD_VERSION); bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); @@ -3894,7 +3895,7 @@ module_exit(bnad_module_exit); MODULE_AUTHOR("Brocade"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); +MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); MODULE_VERSION(BNAD_VERSION); MODULE_FIRMWARE(CNA_FW_FILE_CT); MODULE_FIRMWARE(CNA_FW_FILE_CT2); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 2842c188e0da..7ead6c23edb6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __BNAD_H__ #define __BNAD_H__ @@ -71,7 +72,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.2.23.0" +#define BNAD_VERSION "3.2.25.1" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 619083a860a4..72c89550417c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/debugfs.h> diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index d26adac6ab99..12f344debd1c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include "cna.h" diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index b3ff6d507951..28e7d0ffeab1 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2006-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2006-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #ifndef __CNA_H__ @@ -37,8 +38,8 @@ extern char bfa_version[]; -#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin" -#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" +#define CNA_FW_FILE_CT "ctfw-3.2.5.1.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #pragma pack(1) diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index 6f72771caea6..ebf462d8082f 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -1,5 +1,5 @@ /* - * Linux network driver for Brocade Converged Network Adapter. + * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -11,9 +11,10 @@ * General Public License for more details. */ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved - * www.brocade.com + * www.qlogic.com */ #include <linux/firmware.h> #include "bnad.h" diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 321d2ad235d9..1ba3e3a67389 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -4,7 +4,7 @@ config NET_CADENCE bool "Cadence devices" - depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST) + depends on HAS_IOMEM default y ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -20,17 +20,9 @@ config NET_CADENCE if NET_CADENCE -config ARM_AT91_ETHER - tristate "AT91RM9200 Ethernet support" - depends on HAS_DMA && (ARCH_AT91 || COMPILE_TEST) - select MACB - ---help--- - If you wish to compile a kernel for the AT91RM9200 and enable - ethernet support, then you should always answer Y to this. - config MACB tristate "Cadence MACB/GEM support" - depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST) + depends on HAS_DMA select PHYLIB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile index 9068b8331ed1..91f79b1f0505 100644 --- a/drivers/net/ethernet/cadence/Makefile +++ b/drivers/net/ethernet/cadence/Makefile @@ -2,5 +2,4 @@ # Makefile for the Atmel network device drivers. # -obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o obj-$(CONFIG_MACB) += macb.o diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c deleted file mode 100644 index 7ef55f5fa664..000000000000 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ /dev/null @@ -1,481 +0,0 @@ -/* - * Ethernet driver for the Atmel AT91RM9200 (Thunder) - * - * Copyright (C) 2003 SAN People (Pty) Ltd - * - * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. - * Initial version by Rick Bronson 01/11/2003 - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/dma-mapping.h> -#include <linux/ethtool.h> -#include <linux/platform_data/macb.h> -#include <linux/platform_device.h> -#include <linux/clk.h> -#include <linux/gfp.h> -#include <linux/phy.h> -#include <linux/io.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_net.h> - -#include "macb.h" - -/* 1518 rounded up */ -#define MAX_RBUFF_SZ 0x600 -/* max number of receive buffers */ -#define MAX_RX_DESCR 9 - -/* Initialize and start the Receiver and Transmit subsystems */ -static int at91ether_start(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - dma_addr_t addr; - u32 ctl; - int i; - - lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, - (MAX_RX_DESCR * - sizeof(struct macb_dma_desc)), - &lp->rx_ring_dma, GFP_KERNEL); - if (!lp->rx_ring) - return -ENOMEM; - - lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, - MAX_RX_DESCR * MAX_RBUFF_SZ, - &lp->rx_buffers_dma, GFP_KERNEL); - if (!lp->rx_buffers) { - dma_free_coherent(&lp->pdev->dev, - MAX_RX_DESCR * sizeof(struct macb_dma_desc), - lp->rx_ring, lp->rx_ring_dma); - lp->rx_ring = NULL; - return -ENOMEM; - } - - addr = lp->rx_buffers_dma; - for (i = 0; i < MAX_RX_DESCR; i++) { - lp->rx_ring[i].addr = addr; - lp->rx_ring[i].ctrl = 0; - addr += MAX_RBUFF_SZ; - } - - /* Set the Wrap bit on the last descriptor */ - lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); - - /* Reset buffer index */ - lp->rx_tail = 0; - - /* Program address of descriptor list in Rx Buffer Queue register */ - macb_writel(lp, RBQP, lp->rx_ring_dma); - - /* Enable Receive and Transmit */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); - - return 0; -} - -/* Open the ethernet interface */ -static int at91ether_open(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - u32 ctl; - int ret; - - /* Clear internal statistics */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); - - macb_set_hwaddr(lp); - - ret = at91ether_start(dev); - if (ret) - return ret; - - /* Enable MAC interrupts */ - macb_writel(lp, IER, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); - - /* schedule a link state check */ - phy_start(lp->phy_dev); - - netif_start_queue(dev); - - return 0; -} - -/* Close the interface */ -static int at91ether_close(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - u32 ctl; - - /* Disable Receiver and Transmitter */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); - - /* Disable MAC interrupts */ - macb_writel(lp, IDR, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); - - netif_stop_queue(dev); - - dma_free_coherent(&lp->pdev->dev, - MAX_RX_DESCR * sizeof(struct macb_dma_desc), - lp->rx_ring, lp->rx_ring_dma); - lp->rx_ring = NULL; - - dma_free_coherent(&lp->pdev->dev, - MAX_RX_DESCR * MAX_RBUFF_SZ, - lp->rx_buffers, lp->rx_buffers_dma); - lp->rx_buffers = NULL; - - return 0; -} - -/* Transmit packet */ -static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - - if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { - netif_stop_queue(dev); - - /* Store packet information (to free when Tx completed) */ - lp->skb = skb; - lp->skb_length = skb->len; - lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, - DMA_TO_DEVICE); - - /* Set address of the data in the Transmit Address register */ - macb_writel(lp, TAR, lp->skb_physaddr); - /* Set length of the packet in the Transmit Control register */ - macb_writel(lp, TCR, skb->len); - - } else { - netdev_err(dev, "%s called, but device is busy!\n", __func__); - return NETDEV_TX_BUSY; - } - - return NETDEV_TX_OK; -} - -/* Extract received frame from buffer descriptors and sent to upper layers. - * (Called from interrupt context) - */ -static void at91ether_rx(struct net_device *dev) -{ - struct macb *lp = netdev_priv(dev); - unsigned char *p_recv; - struct sk_buff *skb; - unsigned int pktlen; - - while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { - p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ; - pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); - skb = netdev_alloc_skb(dev, pktlen + 2); - if (skb) { - skb_reserve(skb, 2); - memcpy(skb_put(skb, pktlen), p_recv, pktlen); - - skb->protocol = eth_type_trans(skb, dev); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pktlen; - netif_rx(skb); - } else { - lp->stats.rx_dropped++; - } - - if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) - lp->stats.multicast++; - - /* reset ownership bit */ - lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); - - /* wrap after last buffer */ - if (lp->rx_tail == MAX_RX_DESCR - 1) - lp->rx_tail = 0; - else - lp->rx_tail++; - } -} - -/* MAC interrupt handler */ -static irqreturn_t at91ether_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct macb *lp = netdev_priv(dev); - u32 intstatus, ctl; - - /* MAC Interrupt Status register indicates what interrupts are pending. - * It is automatically cleared once read. - */ - intstatus = macb_readl(lp, ISR); - - /* Receive complete */ - if (intstatus & MACB_BIT(RCOMP)) - at91ether_rx(dev); - - /* Transmit complete */ - if (intstatus & MACB_BIT(TCOMP)) { - /* The TCOM bit is set even if the transmission failed */ - if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) - lp->stats.tx_errors++; - - if (lp->skb) { - dev_kfree_skb_irq(lp->skb); - lp->skb = NULL; - dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); - lp->stats.tx_packets++; - lp->stats.tx_bytes += lp->skb_length; - } - netif_wake_queue(dev); - } - - /* Work-around for EMAC Errata section 41.3.1 */ - if (intstatus & MACB_BIT(RXUBR)) { - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); - macb_writel(lp, NCR, ctl | MACB_BIT(RE)); - } - - if (intstatus & MACB_BIT(ISR_ROVR)) - netdev_err(dev, "ROVR error\n"); - - return IRQ_HANDLED; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void at91ether_poll_controller(struct net_device *dev) -{ - unsigned long flags; - - local_irq_save(flags); - at91ether_interrupt(dev->irq, dev); - local_irq_restore(flags); -} -#endif - -static const struct net_device_ops at91ether_netdev_ops = { - .ndo_open = at91ether_open, - .ndo_stop = at91ether_close, - .ndo_start_xmit = at91ether_start_xmit, - .ndo_get_stats = macb_get_stats, - .ndo_set_rx_mode = macb_set_rx_mode, - .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = macb_ioctl, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = at91ether_poll_controller, -#endif -}; - -#if defined(CONFIG_OF) -static const struct of_device_id at91ether_dt_ids[] = { - { .compatible = "cdns,at91rm9200-emac" }, - { .compatible = "cdns,emac" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, at91ether_dt_ids); -#endif - -/* Detect MAC & PHY and perform ethernet interface initialization */ -static int __init at91ether_probe(struct platform_device *pdev) -{ - struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev); - struct resource *regs; - struct net_device *dev; - struct phy_device *phydev; - struct macb *lp; - int res; - u32 reg; - const char *mac; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) - return -ENOENT; - - dev = alloc_etherdev(sizeof(struct macb)); - if (!dev) - return -ENOMEM; - - lp = netdev_priv(dev); - lp->pdev = pdev; - lp->dev = dev; - spin_lock_init(&lp->lock); - - /* physical base address */ - dev->base_addr = regs->start; - lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); - if (!lp->regs) { - res = -ENOMEM; - goto err_free_dev; - } - - /* Clock */ - lp->pclk = devm_clk_get(&pdev->dev, "ether_clk"); - if (IS_ERR(lp->pclk)) { - res = PTR_ERR(lp->pclk); - goto err_free_dev; - } - clk_prepare_enable(lp->pclk); - - lp->hclk = ERR_PTR(-ENOENT); - lp->tx_clk = ERR_PTR(-ENOENT); - - /* Install the interrupt handler */ - dev->irq = platform_get_irq(pdev, 0); - res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev); - if (res) - goto err_disable_clock; - - dev->netdev_ops = &at91ether_netdev_ops; - dev->ethtool_ops = &macb_ethtool_ops; - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - mac = of_get_mac_address(pdev->dev.of_node); - if (mac) - memcpy(lp->dev->dev_addr, mac, ETH_ALEN); - else - macb_get_hwaddr(lp); - - res = of_get_phy_mode(pdev->dev.of_node); - if (res < 0) { - if (board_data && board_data->is_rmii) - lp->phy_interface = PHY_INTERFACE_MODE_RMII; - else - lp->phy_interface = PHY_INTERFACE_MODE_MII; - } else { - lp->phy_interface = res; - } - - macb_writel(lp, NCR, 0); - - reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); - if (lp->phy_interface == PHY_INTERFACE_MODE_RMII) - reg |= MACB_BIT(RM9200_RMII); - - macb_writel(lp, NCFGR, reg); - - /* Register the network interface */ - res = register_netdev(dev); - if (res) - goto err_disable_clock; - - res = macb_mii_init(lp); - if (res) - goto err_out_unregister_netdev; - - /* will be enabled in open() */ - netif_carrier_off(dev); - - phydev = lp->phy_dev; - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), - phydev->irq); - - /* Display ethernet banner */ - netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n", - dev->base_addr, dev->irq, dev->dev_addr); - - return 0; - -err_out_unregister_netdev: - unregister_netdev(dev); -err_disable_clock: - clk_disable_unprepare(lp->pclk); -err_free_dev: - free_netdev(dev); - return res; -} - -static int at91ether_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct macb *lp = netdev_priv(dev); - - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - - mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); - mdiobus_free(lp->mii_bus); - unregister_netdev(dev); - clk_disable_unprepare(lp->pclk); - free_netdev(dev); - - return 0; -} - -#ifdef CONFIG_PM -static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - struct net_device *net_dev = platform_get_drvdata(pdev); - struct macb *lp = netdev_priv(net_dev); - - if (netif_running(net_dev)) { - netif_stop_queue(net_dev); - netif_device_detach(net_dev); - - clk_disable_unprepare(lp->pclk); - } - return 0; -} - -static int at91ether_resume(struct platform_device *pdev) -{ - struct net_device *net_dev = platform_get_drvdata(pdev); - struct macb *lp = netdev_priv(net_dev); - - if (netif_running(net_dev)) { - clk_prepare_enable(lp->pclk); - - netif_device_attach(net_dev); - netif_start_queue(net_dev); - } - return 0; -} -#else -#define at91ether_suspend NULL -#define at91ether_resume NULL -#endif - -static struct platform_driver at91ether_driver = { - .remove = at91ether_remove, - .suspend = at91ether_suspend, - .resume = at91ether_resume, - .driver = { - .name = "at91_ether", - .of_match_table = of_match_ptr(at91ether_dt_ids), - }, -}; - -module_platform_driver_probe(at91ether_driver, at91ether_probe); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); -MODULE_AUTHOR("Andrew Victor"); -MODULE_ALIAS("platform:at91_ether"); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..9f5387249f24 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -102,7 +102,7 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index) return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); } -void macb_set_hwaddr(struct macb *bp) +static void macb_set_hwaddr(struct macb *bp) { u32 bottom; u16 top; @@ -120,9 +120,8 @@ void macb_set_hwaddr(struct macb *bp) macb_or_gem_writel(bp, SA4B, 0); macb_or_gem_writel(bp, SA4T, 0); } -EXPORT_SYMBOL_GPL(macb_set_hwaddr); -void macb_get_hwaddr(struct macb *bp) +static void macb_get_hwaddr(struct macb *bp) { struct macb_platform_data *pdata; u32 bottom; @@ -162,7 +161,6 @@ void macb_get_hwaddr(struct macb *bp) netdev_info(bp->dev, "invalid hw address, using random\n"); eth_hw_addr_random(bp->dev); } -EXPORT_SYMBOL_GPL(macb_get_hwaddr); static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { @@ -213,6 +211,9 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) { long ferr, rate, rate_rounded; + if (!clk) + return; + switch (speed) { case SPEED_10: rate = 2500000; @@ -292,11 +293,13 @@ static void macb_handle_link_change(struct net_device *dev) spin_unlock_irqrestore(&bp->lock, flags); - if (!IS_ERR(bp->tx_clk)) - macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); - if (status_change) { if (phydev->link) { + /* Update the TX clock rate if and only if the link is + * up and there has been a link change. + */ + macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); + netif_carrier_on(dev); netdev_info(dev, "link up (%d/%s)\n", phydev->speed, @@ -357,7 +360,7 @@ static int macb_mii_probe(struct net_device *dev) return 0; } -int macb_mii_init(struct macb *bp) +static int macb_mii_init(struct macb *bp) { struct macb_platform_data *pdata; struct device_node *np; @@ -438,7 +441,6 @@ err_out_free_mdiobus: err_out: return err; } -EXPORT_SYMBOL_GPL(macb_mii_init); static void macb_update_stats(struct macb *bp) { @@ -449,7 +451,7 @@ static void macb_update_stats(struct macb *bp) WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); for(; p < end; p++, reg++) - *p += __raw_readl(reg); + *p += readl_relaxed(reg); } static int macb_halt_tx(struct macb *bp) @@ -1578,6 +1580,7 @@ static u32 macb_dbw(struct macb *bp) static void macb_configure_dma(struct macb *bp) { u32 dmacfg; + u32 tmp, ncr; if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); @@ -1585,7 +1588,24 @@ static void macb_configure_dma(struct macb *bp) if (bp->dma_burst_length) dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); - dmacfg &= ~GEM_BIT(ENDIA); + dmacfg &= ~GEM_BIT(ENDIA_PKT); + + /* Find the CPU endianness by using the loopback bit of net_ctrl + * register. save it first. When the CPU is in big endian we + * need to program swaped mode for management descriptor access. + */ + ncr = macb_readl(bp, NCR); + __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); + tmp = __raw_readl(bp->regs + MACB_NCR); + + if (tmp == MACB_BIT(LLB)) + dmacfg &= ~GEM_BIT(ENDIA_DESC); + else + dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ + + /* Restore net_ctrl */ + macb_writel(bp, NCR, ncr); + if (bp->dev->features & NETIF_F_HW_CSUM) dmacfg |= GEM_BIT(TXCOEN); else @@ -1723,7 +1743,7 @@ static void macb_sethashtable(struct net_device *dev) /* * Enable/Disable promiscuous and multicast modes. */ -void macb_set_rx_mode(struct net_device *dev) +static void macb_set_rx_mode(struct net_device *dev) { unsigned long cfg; struct macb *bp = netdev_priv(dev); @@ -1764,7 +1784,6 @@ void macb_set_rx_mode(struct net_device *dev) macb_writel(bp, NCFGR, cfg); } -EXPORT_SYMBOL_GPL(macb_set_rx_mode); static int macb_open(struct net_device *dev) { @@ -1832,14 +1851,14 @@ static void gem_update_stats(struct macb *bp) for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { u32 offset = gem_statistics[i].offset; - u64 val = __raw_readl(bp->regs + offset); + u64 val = readl_relaxed(bp->regs + offset); bp->ethtool_stats[i] += val; *p += val; if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { /* Add GEM_OCTTXH, GEM_OCTRXH */ - val = __raw_readl(bp->regs + offset + 4); + val = readl_relaxed(bp->regs + offset + 4); bp->ethtool_stats[i] += ((u64)val) << 32; *(++p) += val; } @@ -1917,7 +1936,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) } } -struct net_device_stats *macb_get_stats(struct net_device *dev) +static struct net_device_stats *macb_get_stats(struct net_device *dev) { struct macb *bp = netdev_priv(dev); struct net_device_stats *nstat = &bp->stats; @@ -1937,12 +1956,12 @@ struct net_device_stats *macb_get_stats(struct net_device *dev) hwstat->rx_oversize_pkts + hwstat->rx_jabbers + hwstat->rx_undersize_pkts + - hwstat->sqe_test_errors + hwstat->rx_length_mismatch); nstat->tx_errors = (hwstat->tx_late_cols + hwstat->tx_excessive_cols + hwstat->tx_underruns + - hwstat->tx_carrier_errors); + hwstat->tx_carrier_errors + + hwstat->sqe_test_errors); nstat->collisions = (hwstat->tx_single_cols + hwstat->tx_multiple_cols + hwstat->tx_excessive_cols); @@ -1963,7 +1982,6 @@ struct net_device_stats *macb_get_stats(struct net_device *dev) return nstat; } -EXPORT_SYMBOL_GPL(macb_get_stats); static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -2019,13 +2037,13 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); regs_buff[11] = macb_tx_dma(&bp->queues[0], head); + regs_buff[12] = macb_or_gem_readl(bp, USRIO); if (macb_is_gem(bp)) { - regs_buff[12] = gem_readl(bp, USRIO); regs_buff[13] = gem_readl(bp, DMACFG); } } -const struct ethtool_ops macb_ethtool_ops = { +static const struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, @@ -2033,7 +2051,6 @@ const struct ethtool_ops macb_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, }; -EXPORT_SYMBOL_GPL(macb_ethtool_ops); static const struct ethtool_ops gem_ethtool_ops = { .get_settings = macb_get_settings, @@ -2047,7 +2064,7 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_sset_count = gem_get_sset_count, }; -int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; @@ -2060,7 +2077,6 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(phydev, rq, cmd); } -EXPORT_SYMBOL_GPL(macb_ioctl); static int macb_set_features(struct net_device *netdev, netdev_features_t features) @@ -2112,63 +2128,20 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_set_features = macb_set_features, }; -#if defined(CONFIG_OF) -static struct macb_config pc302gem_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, - .dma_burst_length = 16, -}; - -static struct macb_config sama5d3_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, - .dma_burst_length = 16, -}; - -static struct macb_config sama5d4_config = { - .caps = 0, - .dma_burst_length = 4, -}; - -static const struct of_device_id macb_dt_ids[] = { - { .compatible = "cdns,at32ap7000-macb" }, - { .compatible = "cdns,at91sam9260-macb" }, - { .compatible = "cdns,macb" }, - { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, - { .compatible = "cdns,gem", .data = &pc302gem_config }, - { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, - { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, macb_dt_ids); -#endif - /* - * Configure peripheral capacities according to device tree + * Configure peripheral capabilities according to device tree * and integration options used */ -static void macb_configure_caps(struct macb *bp) +static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) { u32 dcfg; - const struct of_device_id *match; - const struct macb_config *config; - if (bp->pdev->dev.of_node) { - match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); - if (match && match->data) { - config = (const struct macb_config *)match->data; + if (dt_conf) + bp->caps = dt_conf->caps; - bp->caps = config->caps; - /* - * As we have access to the matching node, configure - * DMA burst length as well - */ - bp->dma_burst_length = config->dma_burst_length; - } - } - - if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2) + if (macb_is_gem_hw(bp->regs)) { bp->caps |= MACB_CAPS_MACB_IS_GEM; - if (macb_is_gem(bp)) { dcfg = gem_readl(bp, DCFG1); if (GEM_BFEXT(IRQCOR, dcfg) == 0) bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; @@ -2185,18 +2158,22 @@ static void macb_probe_queues(void __iomem *mem, unsigned int *num_queues) { unsigned int hw_q; - u32 mid; *queue_mask = 0x1; *num_queues = 1; - /* is it macb or gem ? */ - mid = __raw_readl(mem + MACB_MID); - if (MACB_BFEXT(IDNUM, mid) != 0x2) + /* is it macb or gem ? + * + * We need to read directly from the hardware here because + * we are early in the probe process and don't have the + * MACB_CAPS_MACB_IS_GEM flag positioned + */ + if (!macb_is_gem_hw(mem)) return; /* bit 0 is never set but queue 0 always exists */ - *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff; + *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; + *queue_mask |= 0x1; for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) @@ -2204,95 +2181,73 @@ static void macb_probe_queues(void __iomem *mem, (*num_queues)++; } -static int macb_probe(struct platform_device *pdev) +static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, + struct clk **hclk, struct clk **tx_clk) { - struct macb_platform_data *pdata; - struct resource *regs; - struct net_device *dev; - struct macb *bp; - struct macb_queue *queue; - struct phy_device *phydev; - u32 config; - int err = -ENXIO; - const char *mac; - void __iomem *mem; - unsigned int hw_q, queue_mask, q, num_queues; - struct clk *pclk, *hclk, *tx_clk; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(&pdev->dev, "no mmio resource defined\n"); - goto err_out; - } + int err; - pclk = devm_clk_get(&pdev->dev, "pclk"); - if (IS_ERR(pclk)) { - err = PTR_ERR(pclk); + *pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(*pclk)) { + err = PTR_ERR(*pclk); dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); - goto err_out; + return err; } - hclk = devm_clk_get(&pdev->dev, "hclk"); - if (IS_ERR(hclk)) { - err = PTR_ERR(hclk); + *hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(*hclk)) { + err = PTR_ERR(*hclk); dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); - goto err_out; + return err; } - tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); + *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); + if (IS_ERR(*tx_clk)) + *tx_clk = NULL; - err = clk_prepare_enable(pclk); + err = clk_prepare_enable(*pclk); if (err) { dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); - goto err_out; + return err; } - err = clk_prepare_enable(hclk); + err = clk_prepare_enable(*hclk); if (err) { dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); - goto err_out_disable_pclk; + goto err_disable_pclk; } - if (!IS_ERR(tx_clk)) { - err = clk_prepare_enable(tx_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", - err); - goto err_out_disable_hclk; - } + err = clk_prepare_enable(*tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + goto err_disable_hclk; } - err = -ENOMEM; - mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); - if (!mem) { - dev_err(&pdev->dev, "failed to map registers, aborting.\n"); - goto err_out_disable_clocks; - } + return 0; - macb_probe_queues(mem, &queue_mask, &num_queues); - dev = alloc_etherdev_mq(sizeof(*bp), num_queues); - if (!dev) - goto err_out_disable_clocks; +err_disable_hclk: + clk_disable_unprepare(*hclk); - SET_NETDEV_DEV(dev, &pdev->dev); +err_disable_pclk: + clk_disable_unprepare(*pclk); - bp = netdev_priv(dev); - bp->pdev = pdev; - bp->dev = dev; - bp->regs = mem; - bp->num_queues = num_queues; - bp->pclk = pclk; - bp->hclk = hclk; - bp->tx_clk = tx_clk; + return err; +} - spin_lock_init(&bp->lock); +static int macb_init(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + unsigned int hw_q, q; + struct macb *bp = netdev_priv(dev); + struct macb_queue *queue; + int err; + u32 val; /* set the queue register mapping once for all: queue0 has a special * register mapping but we don't want to test the queue index then * compute the corresponding register offset at run time. */ for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { - if (!(queue_mask & (1 << hw_q))) + if (!(bp->queue_mask & (1 << hw_q))) continue; queue = &bp->queues[q]; @@ -2319,27 +2274,21 @@ static int macb_probe(struct platform_device *pdev) */ queue->irq = platform_get_irq(pdev, q); err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, - 0, dev->name, queue); + IRQF_SHARED, dev->name, queue); if (err) { dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", queue->irq, err); - goto err_out_free_netdev; + return err; } INIT_WORK(&queue->tx_error_task, macb_tx_error_task); q++; } - dev->irq = bp->queues[0].irq; dev->netdev_ops = &macb_netdev_ops; netif_napi_add(dev, &bp->napi, macb_poll, 64); - dev->base_addr = regs->start; - - /* setup capacities */ - macb_configure_caps(bp); - /* setup appropriated routines according to adapter type */ if (macb_is_gem(bp)) { bp->max_tx_length = GEM_MAX_TX_LEN; @@ -2366,18 +2315,470 @@ static int macb_probe(struct platform_device *pdev) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; + val = 0; + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + val = GEM_BIT(RGMII); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(RMII); + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(MII); + + if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) + val |= MACB_BIT(CLKEN); + + macb_or_gem_writel(bp, USRIO, val); + /* Set MII management clock divider */ - config = macb_mdc_clk_div(bp); - config |= macb_dbw(bp); - macb_writel(bp, NCFGR, config); + val = macb_mdc_clk_div(bp); + val |= macb_dbw(bp); + macb_writel(bp, NCFGR, val); + + return 0; +} + +#if defined(CONFIG_OF) +/* 1518 rounded up */ +#define AT91ETHER_MAX_RBUFF_SZ 0x600 +/* max number of receive buffers */ +#define AT91ETHER_MAX_RX_DESCR 9 + +/* Initialize and start the Receiver and Transmit subsystems */ +static int at91ether_start(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + dma_addr_t addr; + u32 ctl; + int i; + + lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, + (AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc)), + &lp->rx_ring_dma, GFP_KERNEL); + if (!lp->rx_ring) + return -ENOMEM; + + lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + AT91ETHER_MAX_RBUFF_SZ, + &lp->rx_buffers_dma, GFP_KERNEL); + if (!lp->rx_buffers) { + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + return -ENOMEM; + } + + addr = lp->rx_buffers_dma; + for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { + lp->rx_ring[i].addr = addr; + lp->rx_ring[i].ctrl = 0; + addr += AT91ETHER_MAX_RBUFF_SZ; + } + + /* Set the Wrap bit on the last descriptor */ + lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); + + /* Reset buffer index */ + lp->rx_tail = 0; + + /* Program address of descriptor list in Rx Buffer Queue register */ + macb_writel(lp, RBQP, lp->rx_ring_dma); + + /* Enable Receive and Transmit */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); + + return 0; +} + +/* Open the ethernet interface */ +static int at91ether_open(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + u32 ctl; + int ret; + + /* Clear internal statistics */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); + + macb_set_hwaddr(lp); + + ret = at91ether_start(dev); + if (ret) + return ret; + + /* Enable MAC interrupts */ + macb_writel(lp, IER, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + /* schedule a link state check */ + phy_start(lp->phy_dev); + + netif_start_queue(dev); + + return 0; +} + +/* Close the interface */ +static int at91ether_close(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + u32 ctl; + + /* Disable Receiver and Transmitter */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); + + /* Disable MAC interrupts */ + macb_writel(lp, IDR, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + netif_stop_queue(dev); + + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, + lp->rx_buffers, lp->rx_buffers_dma); + lp->rx_buffers = NULL; + + return 0; +} + +/* Transmit packet */ +static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + + if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { + netif_stop_queue(dev); + + /* Store packet information (to free when Tx completed) */ + lp->skb = skb; + lp->skb_length = skb->len; + lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, + DMA_TO_DEVICE); + + /* Set address of the data in the Transmit Address register */ + macb_writel(lp, TAR, lp->skb_physaddr); + /* Set length of the packet in the Transmit Control register */ + macb_writel(lp, TCR, skb->len); + + } else { + netdev_err(dev, "%s called, but device is busy!\n", __func__); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} - mac = of_get_mac_address(pdev->dev.of_node); +/* Extract received frame from buffer descriptors and sent to upper layers. + * (Called from interrupt context) + */ +static void at91ether_rx(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + unsigned char *p_recv; + struct sk_buff *skb; + unsigned int pktlen; + + while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { + p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; + pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); + skb = netdev_alloc_skb(dev, pktlen + 2); + if (skb) { + skb_reserve(skb, 2); + memcpy(skb_put(skb, pktlen), p_recv, pktlen); + + skb->protocol = eth_type_trans(skb, dev); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pktlen; + netif_rx(skb); + } else { + lp->stats.rx_dropped++; + } + + if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) + lp->stats.multicast++; + + /* reset ownership bit */ + lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); + + /* wrap after last buffer */ + if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) + lp->rx_tail = 0; + else + lp->rx_tail++; + } +} + +/* MAC interrupt handler */ +static irqreturn_t at91ether_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct macb *lp = netdev_priv(dev); + u32 intstatus, ctl; + + /* MAC Interrupt Status register indicates what interrupts are pending. + * It is automatically cleared once read. + */ + intstatus = macb_readl(lp, ISR); + + /* Receive complete */ + if (intstatus & MACB_BIT(RCOMP)) + at91ether_rx(dev); + + /* Transmit complete */ + if (intstatus & MACB_BIT(TCOMP)) { + /* The TCOM bit is set even if the transmission failed */ + if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) + lp->stats.tx_errors++; + + if (lp->skb) { + dev_kfree_skb_irq(lp->skb); + lp->skb = NULL; + dma_unmap_single(NULL, lp->skb_physaddr, + lp->skb_length, DMA_TO_DEVICE); + lp->stats.tx_packets++; + lp->stats.tx_bytes += lp->skb_length; + } + netif_wake_queue(dev); + } + + /* Work-around for EMAC Errata section 41.3.1 */ + if (intstatus & MACB_BIT(RXUBR)) { + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); + macb_writel(lp, NCR, ctl | MACB_BIT(RE)); + } + + if (intstatus & MACB_BIT(ISR_ROVR)) + netdev_err(dev, "ROVR error\n"); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void at91ether_poll_controller(struct net_device *dev) +{ + unsigned long flags; + + local_irq_save(flags); + at91ether_interrupt(dev->irq, dev); + local_irq_restore(flags); +} +#endif + +static const struct net_device_ops at91ether_netdev_ops = { + .ndo_open = at91ether_open, + .ndo_stop = at91ether_close, + .ndo_start_xmit = at91ether_start_xmit, + .ndo_get_stats = macb_get_stats, + .ndo_set_rx_mode = macb_set_rx_mode, + .ndo_set_mac_address = eth_mac_addr, + .ndo_do_ioctl = macb_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = at91ether_poll_controller, +#endif +}; + +static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, + struct clk **hclk, struct clk **tx_clk) +{ + int err; + + *hclk = NULL; + *tx_clk = NULL; + + *pclk = devm_clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(*pclk)) + return PTR_ERR(*pclk); + + err = clk_prepare_enable(*pclk); + if (err) { + dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); + return err; + } + + return 0; +} + +static int at91ether_init(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(dev); + int err; + u32 reg; + + dev->netdev_ops = &at91ether_netdev_ops; + dev->ethtool_ops = &macb_ethtool_ops; + + err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, + 0, dev->name, dev); + if (err) + return err; + + macb_writel(bp, NCR, 0); + + reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); + if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) + reg |= MACB_BIT(RM9200_RMII); + + macb_writel(bp, NCFGR, reg); + + return 0; +} + +static const struct macb_config at91sam9260_config = { + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config pc302gem_config = { + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config sama5d3_config = { + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config sama5d4_config = { + .caps = 0, + .dma_burst_length = 4, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config emac_config = { + .clk_init = at91ether_clk_init, + .init = at91ether_init, +}; + +static const struct of_device_id macb_dt_ids[] = { + { .compatible = "cdns,at32ap7000-macb" }, + { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, + { .compatible = "cdns,macb" }, + { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, + { .compatible = "cdns,gem", .data = &pc302gem_config }, + { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, + { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, + { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, + { .compatible = "cdns,emac", .data = &emac_config }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, macb_dt_ids); +#endif /* CONFIG_OF */ + +static int macb_probe(struct platform_device *pdev) +{ + int (*clk_init)(struct platform_device *, struct clk **, + struct clk **, struct clk **) + = macb_clk_init; + int (*init)(struct platform_device *) = macb_init; + struct device_node *np = pdev->dev.of_node; + const struct macb_config *macb_config = NULL; + struct clk *pclk, *hclk, *tx_clk; + unsigned int queue_mask, num_queues; + struct macb_platform_data *pdata; + struct phy_device *phydev; + struct net_device *dev; + struct resource *regs; + void __iomem *mem; + const char *mac; + struct macb *bp; + int err; + + if (np) { + const struct of_device_id *match; + + match = of_match_node(macb_dt_ids, np); + if (match && match->data) { + macb_config = match->data; + clk_init = macb_config->clk_init; + init = macb_config->init; + } + } + + err = clk_init(pdev, &pclk, &hclk, &tx_clk); + if (err) + return err; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mem = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(mem)) { + err = PTR_ERR(mem); + goto err_disable_clocks; + } + + macb_probe_queues(mem, &queue_mask, &num_queues); + dev = alloc_etherdev_mq(sizeof(*bp), num_queues); + if (!dev) { + err = -ENOMEM; + goto err_disable_clocks; + } + + dev->base_addr = regs->start; + + SET_NETDEV_DEV(dev, &pdev->dev); + + bp = netdev_priv(dev); + bp->pdev = pdev; + bp->dev = dev; + bp->regs = mem; + bp->num_queues = num_queues; + bp->queue_mask = queue_mask; + if (macb_config) + bp->dma_burst_length = macb_config->dma_burst_length; + bp->pclk = pclk; + bp->hclk = hclk; + bp->tx_clk = tx_clk; + spin_lock_init(&bp->lock); + + /* setup capabilities */ + macb_configure_caps(bp, macb_config); + + platform_set_drvdata(pdev, dev); + + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq < 0) { + err = dev->irq; + goto err_disable_clocks; + } + + mac = of_get_mac_address(np); if (mac) memcpy(bp->dev->dev_addr, mac, ETH_ALEN); else macb_get_hwaddr(bp); - err = of_get_phy_mode(pdev->dev.of_node); + err = of_get_phy_mode(np); if (err < 0) { pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->is_rmii) @@ -2388,34 +2789,21 @@ static int macb_probe(struct platform_device *pdev) bp->phy_interface = err; } - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) - macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII)); - else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) -#if defined(CONFIG_ARCH_AT91) - macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | - MACB_BIT(CLKEN))); -#else - macb_or_gem_writel(bp, USRIO, 0); -#endif - else -#if defined(CONFIG_ARCH_AT91) - macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); -#else - macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); -#endif + /* IP specific init */ + err = init(pdev); + if (err) + goto err_out_free_netdev; err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_free_netdev; + goto err_out_unregister_netdev; } err = macb_mii_init(bp); if (err) goto err_out_unregister_netdev; - platform_set_drvdata(pdev, dev); - netif_carrier_off(dev); netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", @@ -2430,16 +2818,15 @@ static int macb_probe(struct platform_device *pdev) err_out_unregister_netdev: unregister_netdev(dev); + err_out_free_netdev: free_netdev(dev); -err_out_disable_clocks: - if (!IS_ERR(tx_clk)) - clk_disable_unprepare(tx_clk); -err_out_disable_hclk: + +err_disable_clocks: + clk_disable_unprepare(tx_clk); clk_disable_unprepare(hclk); -err_out_disable_pclk: clk_disable_unprepare(pclk); -err_out: + return err; } @@ -2458,8 +2845,7 @@ static int macb_remove(struct platform_device *pdev) kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); unregister_netdev(dev); - if (!IS_ERR(bp->tx_clk)) - clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); free_netdev(dev); @@ -2477,8 +2863,7 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_carrier_off(netdev); netif_device_detach(netdev); - if (!IS_ERR(bp->tx_clk)) - clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); @@ -2493,8 +2878,7 @@ static int __maybe_unused macb_resume(struct device *dev) clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); - if (!IS_ERR(bp->tx_clk)) - clk_prepare_enable(bp->tx_clk); + clk_prepare_enable(bp->tx_clk); netif_device_attach(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..eb7d76f7bf6a 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -11,7 +11,7 @@ #define _MACB_H #define MACB_GREGS_NBR 16 -#define MACB_GREGS_VERSION 1 +#define MACB_GREGS_VERSION 2 #define MACB_MAX_QUEUES 8 /* MACB register offsets */ @@ -229,8 +229,10 @@ /* Bitfields in DMACFG. */ #define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */ #define GEM_FBLDO_SIZE 5 -#define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */ -#define GEM_ENDIA_SIZE 1 +#define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */ +#define GEM_ENDIA_DESC_SIZE 1 +#define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */ +#define GEM_ENDIA_PKT_SIZE 1 #define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */ #define GEM_RXBMS_SIZE 2 #define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */ @@ -351,7 +353,7 @@ /* Bitfields in MID */ #define MACB_IDNUM_OFFSET 16 -#define MACB_IDNUM_SIZE 16 +#define MACB_IDNUM_SIZE 12 #define MACB_REV_OFFSET 0 #define MACB_REV_SIZE 16 @@ -389,6 +391,8 @@ /* Capability mask bits */ #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 +#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -423,17 +427,17 @@ /* Register access macros */ #define macb_readl(port,reg) \ - __raw_readl((port)->regs + MACB_##reg) + readl_relaxed((port)->regs + MACB_##reg) #define macb_writel(port,reg,value) \ - __raw_writel((value), (port)->regs + MACB_##reg) + writel_relaxed((value), (port)->regs + MACB_##reg) #define gem_readl(port, reg) \ - __raw_readl((port)->regs + GEM_##reg) + readl_relaxed((port)->regs + GEM_##reg) #define gem_writel(port, reg, value) \ - __raw_writel((value), (port)->regs + GEM_##reg) + writel_relaxed((value), (port)->regs + GEM_##reg) #define queue_readl(queue, reg) \ - __raw_readl((queue)->bp->regs + (queue)->reg) + readl_relaxed((queue)->bp->regs + (queue)->reg) #define queue_writel(queue, reg, value) \ - __raw_writel((value), (queue)->bp->regs + (queue)->reg) + writel_relaxed((value), (queue)->bp->regs + (queue)->reg) /* Conditional GEM/MACB macros. These perform the operation to the correct * register dependent on whether the device is a GEM or a MACB. For registers @@ -750,6 +754,9 @@ struct macb_or_gem_ops { struct macb_config { u32 caps; unsigned int dma_burst_length; + int (*clk_init)(struct platform_device *pdev, struct clk **pclk, + struct clk **hclk, struct clk **tx_clk); + int (*init)(struct platform_device *pdev); }; struct macb_queue { @@ -780,6 +787,7 @@ struct macb { size_t rx_buffer_size; unsigned int num_queues; + unsigned int queue_mask; struct macb_queue queues[MACB_MAX_QUEUES]; spinlock_t lock; @@ -820,18 +828,14 @@ struct macb { u64 ethtool_stats[GEM_STATS_LEN]; }; -extern const struct ethtool_ops macb_ethtool_ops; - -int macb_mii_init(struct macb *bp); -int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -struct net_device_stats *macb_get_stats(struct net_device *dev); -void macb_set_rx_mode(struct net_device *dev); -void macb_set_hwaddr(struct macb *bp); -void macb_get_hwaddr(struct macb *bp); - static inline bool macb_is_gem(struct macb *bp) { return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); } +static inline bool macb_is_gem_hw(void __iomem *addr) +{ + return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2); +} + #endif /* _MACB_H */ diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 47bfea24b9e1..63efa0dc45ba 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -47,9 +47,9 @@ #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ -#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ +#define XGMAC_MMC_INTR_RX 0x00000804 /* Receive Interrupt */ #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ -#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ +#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Receive Interrupt Mask */ #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ /* Hardware TX Statistics Counters */ @@ -153,7 +153,7 @@ #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ #define XGMAC_FLOW_CTRL_PT_SHIFT 16 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ -#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ +#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshold */ #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ @@ -254,18 +254,18 @@ /* XGMAC Operation Mode Register */ #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ -#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ +#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshold Ctrl */ #define XGMAC_OMR_TTC_MASK 0x00030000 -#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ -#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ -#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ -#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ +#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshold */ +#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshold MASK */ +#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshold */ +#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshold MASK */ #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ -#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */ -#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ +#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshold Ctrl */ +#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshold Ctrl MASK */ /* XGMAC HW Features Register */ #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index ac6473f75eb9..7daa088a9bb7 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -97,6 +97,17 @@ config CHELSIO_T4_DCB If unsure, say N. +config CHELSIO_T4_FCOE + bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards" + default n + depends on CHELSIO_T4 && CHELSIO_T4_DCB && FCOE + ---help--- + Enable FCoE offload features. + Say Y here if you want to enable Fibre Channel over Ethernet (FCoE) support + in the driver. + + If unsure, say N. + config CHELSIO_T4VF tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" depends on PCI diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 186566bfdbc8..f5f1b0b51ebd 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -354,7 +354,7 @@ static void set_msglevel(struct net_device *dev, u32 val) adapter->msg_enable = val; } -static char stats_strings[][ETH_GSTRING_LEN] = { +static const char stats_strings[][ETH_GSTRING_LEN] = { "TxOctetsOK", "TxOctetsBad", "TxUnicastFramesOK", diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index db76f7040455..b96e4bfcac41 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1537,7 +1537,7 @@ static void set_msglevel(struct net_device *dev, u32 val) adapter->msg_enable = val; } -static char stats_strings[][ETH_GSTRING_LEN] = { +static const char stats_strings[][ETH_GSTRING_LEN] = { "TxOctetsOK ", "TxFramesOK ", "TxMulticastFramesOK", diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index d6aa602f168d..e4b5b057f417 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -422,7 +422,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); - wmb(); + dma_wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); return 0; @@ -433,7 +433,7 @@ static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, { d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); - wmb(); + dma_wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); return 0; @@ -579,7 +579,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, q->sdesc[q->pidx] = q->sdesc[idx]; to->addr_lo = from->addr_lo; /* already big endian */ to->addr_hi = from->addr_hi; /* likewise */ - wmb(); + dma_wmb(); to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); @@ -1068,7 +1068,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, sd->eop = 1; wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | V_WR_SGLSFLT(flits)) | wr_hi; - wmb(); + dma_wmb(); wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(gen)) | wr_lo; wr_gen2(d, gen); @@ -1114,7 +1114,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, } sd->eop = 1; wrp->wr_hi |= htonl(F_WR_EOP); - wmb(); + dma_wmb(); wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; wr_gen2((struct tx_desc *)wp, ogen); WARN_ON(ndesc != 0); @@ -1184,7 +1184,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | F_WR_SOP | F_WR_EOP | compl); - wmb(); + dma_wmb(); cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | V_WR_TID(q->token)); wr_gen2(d, gen); @@ -1342,7 +1342,7 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | V_WR_BCNTLFLT(len & 7)); - wmb(); + dma_wmb(); to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8)); wr_gen2(d, gen); @@ -2271,7 +2271,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, u32 len, flags; __be32 rss_hi, rss_lo; - rmb(); + dma_rmb(); eth = r->rss_hdr.opcode == CPL_RX_PKT; rss_hi = *(const __be32 *)r; rss_lo = r->rss_hdr.rss_hash_val; @@ -2488,7 +2488,7 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, } if (!is_new_response(r, q)) break; - rmb(); + dma_rmb(); } while (is_pure_response(r)); if (sleeping) @@ -2523,7 +2523,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) if (!is_new_response(r, q)) return -1; - rmb(); + dma_rmb(); if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 184a8d545ac4..a22768c94200 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -840,7 +840,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's - * natural endianess. + * natural endianness. */ static int t3_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index ae50cd72358c..ace0ab98d0f1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o +cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 9062a8434246..c308429dd9c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key) } static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, - int addr_len) + u8 v6) { - return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : - ipv6_clip_hash(ctbl, addr); + return v6 ? ipv6_clip_hash(ctbl, addr) : + ipv4_clip_hash(ctbl, addr); } static int clip6_get_mbox(const struct net_device *dev, @@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) struct clip_entry *ce, *cte; u32 *addr = (u32 *)lip; int hash; - int addr_len; - int ret = 0; + int ret = -1; if (!ctbl) return 0; - if (v6) - addr_len = 16; - else - addr_len = 4; - - hash = clip_addr_hash(ctbl, addr, addr_len); + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); list_for_each_entry(cte, &ctbl->hash_list[hash], list) { - if (addr_len == cte->addr_len && - memcmp(lip, cte->addr, cte->addr_len) == 0) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { ce = cte; read_unlock_bh(&ctbl->lock); goto found; @@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) spin_lock_init(&ce->lock); atomic_set(&ce->refcnt, 0); atomic_dec(&ctbl->nfree); - ce->addr_len = addr_len; - memcpy(ce->addr, lip, addr_len); list_add_tail(&ce->list, &ctbl->hash_list[hash]); if (v6) { + ce->addr6.sin6_family = AF_INET6; + memcpy(ce->addr6.sin6_addr.s6_addr, + lip, sizeof(struct in6_addr)); ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); if (ret) { write_unlock_bh(&ctbl->lock); return ret; } + } else { + ce->addr.sin_family = AF_INET; + memcpy((char *)(&ce->addr.sin_addr), lip, + sizeof(struct in_addr)); } } else { write_unlock_bh(&ctbl->lock); @@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) struct clip_entry *ce, *cte; u32 *addr = (u32 *)lip; int hash; - int addr_len; - - if (v6) - addr_len = 16; - else - addr_len = 4; + int ret = -1; - hash = clip_addr_hash(ctbl, addr, addr_len); + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); list_for_each_entry(cte, &ctbl->hash_list[hash], list) { - if (addr_len == cte->addr_len && - memcmp(lip, cte->addr, cte->addr_len) == 0) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { ce = cte; read_unlock_bh(&ctbl->lock); goto found; @@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v) for (i = 0 ; i < ctbl->clipt_size; ++i) { list_for_each_entry(ce, &ctbl->hash_list[i], list) { ip[0] = '\0'; - if (ce->addr_len == 16) - sprintf(ip, "%pI6c", ce->addr); - else - sprintf(ip, "%pI4c", ce->addr); + sprintf(ip, "%pISc", &ce->addr); seq_printf(seq, "%-25s %u\n", ip, atomic_read(&ce->refcnt)); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 2eaba0161cf8..35eb43c6bcbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h @@ -14,8 +14,10 @@ struct clip_entry { spinlock_t lock; /* Hold while modifying clip reference */ atomic_t refcnt; struct list_head list; - u32 addr[4]; - int addr_len; + union { + struct sockaddr_in addr; + struct sockaddr_in6 addr6; + }; }; struct clip_tbl { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d6cda17efe6e..524d11098c56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -60,6 +60,11 @@ enum { }; enum { + T4_REGMAP_SIZE = (160 * 1024), + T5_REGMAP_SIZE = (332 * 1024), +}; + +enum { MEM_EDC0, MEM_EDC1, MEM_MC, @@ -369,15 +374,24 @@ enum { MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ - MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */ + MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ }; enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 32, + MIN_CTRL_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16 +}; + +enum { INGQ_EXTRAS = 2, /* firmware event queue and */ /* forwarded interrupts */ - MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2 - + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES, MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, }; @@ -387,6 +401,10 @@ struct sge_rspq; #include "cxgb4_dcb.h" +#ifdef CONFIG_CHELSIO_T4_FCOE +#include "cxgb4_fcoe.h" +#endif /* CONFIG_CHELSIO_T4_FCOE */ + struct port_info { struct adapter *adapter; u16 viid; @@ -406,6 +424,9 @@ struct port_info { #ifdef CONFIG_CHELSIO_T4_DCB struct port_dcb_info dcb; /* Data Center Bridging support */ #endif +#ifdef CONFIG_CHELSIO_T4_FCOE + struct cxgb_fcoe fcoe; +#endif /* CONFIG_CHELSIO_T4_FCOE */ }; struct dentry; @@ -599,8 +620,8 @@ struct sge { u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ u16 ofld_rxq[MAX_OFLD_QSETS]; - u16 rdma_rxq[NCHAN]; - u16 rdma_ciq[NCHAN]; + u16 rdma_rxq[MAX_RDMA_QUEUES]; + u16 rdma_ciq[MAX_RDMA_CIQS]; u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; u32 fl_pg_order; /* large page allocation size */ @@ -616,11 +637,13 @@ struct sge { unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ unsigned int egr_start; + unsigned int egr_sz; unsigned int ingr_start; - void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ - struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ - DECLARE_BITMAP(starving_fl, MAX_EGRQ); - DECLARE_BITMAP(txq_maperr, MAX_EGRQ); + unsigned int ingr_sz; + void **egr_map; /* qid->queue egress queue map */ + struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ + unsigned long *starving_fl; + unsigned long *txq_maperr; struct timer_list rx_timer; /* refills starving FLs */ struct timer_list tx_timer; /* checks Tx queues */ }; @@ -993,6 +1016,30 @@ static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) } #endif /* CONFIG_NET_RX_BUSY_POLL */ +/* Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + * - bits 16..23: register dump version + */ +static inline unsigned int mk_adap_vers(struct adapter *ap) +{ + return CHELSIO_CHIP_VERSION(ap->params.chip) | + (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); +} + +/* Return a queue's interrupt hold-off time in us. 0 means no timer. */ +static inline unsigned int qtimer_val(const struct adapter *adap, + const struct sge_rspq *q) +{ + unsigned int idx = q->intr_params >> 1; + + return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; +} + +/* driver version & name used for ethtool_drvinfo */ +extern char cxgb4_driver_name[]; +extern const char cxgb4_driver_version[]; + void t4_os_portmod_changed(const struct adapter *adap, int port_id); void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); @@ -1022,6 +1069,10 @@ int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); int cxgb_busy_poll(struct napi_struct *napi); +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +void cxgb4_set_ethtool_ops(struct net_device *netdev); +int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); extern int dbfifo_int_thresh; #define for_each_port(adapter, iter) \ @@ -1103,13 +1154,16 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); #define T4_MEMORY_WRITE 0 #define T4_MEMORY_READ 1 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, - __be32 *buf, int dir); + void *buf, int dir); static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf) { return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0); } +unsigned int t4_get_regs_len(struct adapter *adapter); +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); + int t4_seeprom_wp(struct adapter *adapter, bool enable); int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_read_flash(struct adapter *adapter, unsigned int addr, @@ -1136,6 +1190,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qtimer_val(const struct adapter *adap, const struct sge_rspq *q); + +int t4_init_devlog_params(struct adapter *adapter); int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 78854ceb0870..371f75e782e5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -538,7 +538,7 @@ static ssize_t tp_la_write(struct file *file, const char __user *buf, char s[32]; unsigned long val; size_t size = min(sizeof(s) - 1, count); - struct adapter *adap = FILE_DATA(file)->i_private; + struct adapter *adap = file_inode(file)->i_private; if (copy_from_user(s, buf, size)) return -EFAULT; @@ -647,7 +647,7 @@ static int pm_stats_open(struct inode *inode, struct file *file) static ssize_t pm_stats_clear(struct file *file, const char __user *buf, size_t count, loff_t *pos) { - struct adapter *adap = FILE_DATA(file)->i_private; + struct adapter *adap = file_inode(file)->i_private; t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0); t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0); @@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) "0.9375" }; int i; - u16 incr[NMTUS][NCCTRL_WIN]; + u16 (*incr)[NCCTRL_WIN]; struct adapter *adap = seq->private; + incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL); + if (!incr) + return -ENOMEM; + t4_read_cong_tbl(adap, incr); for (i = 0; i < NCCTRL_WIN; ++i) { @@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) adap->params.a_wnd[i], dec_fac[adap->params.b_wnd[i]]); } + + kfree(incr); return 0; } @@ -999,7 +1005,7 @@ static ssize_t mbox_write(struct file *file, const char __user *buf, &data[7], &c) < 8 || c != '\n') return -EINVAL; - ino = FILE_DATA(file); + ino = file_inode(file); mbox = (uintptr_t)ino->i_private & 7; adap = ino->i_private - mbox; addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); @@ -1028,7 +1034,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { loff_t pos = *ppos; - loff_t avail = FILE_DATA(file)->i_size; + loff_t avail = file_inode(file)->i_size; struct adapter *adap = file->private_data; if (pos < 0) @@ -1473,7 +1479,7 @@ static ssize_t rss_key_write(struct file *file, const char __user *buf, int i, j; u32 key[10]; char s[100], *p; - struct adapter *adap = FILE_DATA(file)->i_private; + struct adapter *adap = file_inode(file)->i_private; if (count > sizeof(s) - 1) return -EINVAL; @@ -1769,6 +1775,8 @@ do { \ int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx); S("QType:", "RDMA-CPL"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); @@ -1788,6 +1796,8 @@ do { \ int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx); S("QType:", "RDMA-CIQ"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); @@ -1941,12 +1951,6 @@ static const struct file_operations mem_debugfs_fops = { .llseek = default_llseek, }; -static void set_debugfs_file_size(struct dentry *de, loff_t size) -{ - if (!IS_ERR(de) && de->d_inode) - de->d_inode->i_size = size; -} - static void add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) { @@ -2062,9 +2066,8 @@ int t4_setup_debugfs(struct adapter *adap) } } - de = debugfs_create_file("flash", S_IRUSR, adap->debugfs_root, adap, - &flash_debugfs_fops); - set_debugfs_file_size(de, adap->params.sf_size); + de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, + &flash_debugfs_fops, adap->params.sf_size); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h index 8f418ba868bd..23f43a0f8950 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h @@ -37,8 +37,6 @@ #include <linux/export.h> -#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode) - #define DEFINE_SIMPLE_DEBUGFS_FILE(name) \ static int name##_open(struct inode *inode, struct file *file) \ { \ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c new file mode 100644 index 000000000000..10d82b51d7ef --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -0,0 +1,915 @@ +/* + * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include <linux/firmware.h> +#include <linux/mdio.h> + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" + +#define EEPROM_MAGIC 0x38E2F10C + +static u32 get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + netdev2adap(dev)->msg_enable = val; +} + +static const char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxBroadcastFrames ", + "TxMulticastFrames ", + "TxUnicastFrames ", + "TxErrorFrames ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "TxFramesDropped ", + "TxPauseFrames ", + "TxPPP0Frames ", + "TxPPP1Frames ", + "TxPPP2Frames ", + "TxPPP3Frames ", + "TxPPP4Frames ", + "TxPPP5Frames ", + "TxPPP6Frames ", + "TxPPP7Frames ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxBroadcastFrames ", + "RxMulticastFrames ", + "RxUnicastFrames ", + + "RxFramesTooLong ", + "RxJabberErrors ", + "RxFCSErrors ", + "RxLengthErrors ", + "RxSymbolErrors ", + "RxRuntFrames ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "RxPauseFrames ", + "RxPPP0Frames ", + "RxPPP1Frames ", + "RxPPP2Frames ", + "RxPPP3Frames ", + "RxPPP4Frames ", + "RxPPP5Frames ", + "RxPPP6Frames ", + "RxPPP7Frames ", + + "RxBG0FramesDropped ", + "RxBG1FramesDropped ", + "RxBG2FramesDropped ", + "RxBG3FramesDropped ", + "RxBG0FramesTrunc ", + "RxBG1FramesTrunc ", + "RxBG2FramesTrunc ", + "RxBG3FramesTrunc ", + + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", + "GROpackets ", + "GROmerged ", + "WriteCoalSuccess ", + "WriteCoalFail ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +static int get_regs_len(struct net_device *dev) +{ + struct adapter *adap = netdev2adap(dev); + + return t4_get_regs_len(adap); +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + u32 exprom_vers; + + strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); + strlcpy(info->version, cxgb4_driver_version, + sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); + + if (adapter->params.fw_vers) + snprintf(info->fw_version, sizeof(info->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); + + if (!t4_get_exprom_version(adapter, &exprom_vers)) + snprintf(info->erom_version, sizeof(info->erom_version), + "%u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_G(exprom_vers), + FW_HDR_FW_VER_MINOR_G(exprom_vers), + FW_HDR_FW_VER_MICRO_G(exprom_vers), + FW_HDR_FW_VER_BUILD_G(exprom_vers)); +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +/* port stats maintained per queue of the port. They should be in the same + * order as in stats_strings above. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; + u64 gro_pkts; + u64 gro_merged; +}; + +static void collect_sge_port_stats(const struct adapter *adap, + const struct port_info *p, + struct queue_port_stats *s) +{ + int i; + const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; + const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < p->nqsets; i++, rx++, tx++) { + s->tso += tx->tso; + s->tx_csum += tx->tx_cso; + s->rx_csum += rx->stats.rx_cso; + s->vlan_ex += rx->stats.vlan_ex; + s->vlan_ins += tx->vlan_ins; + s->gro_pkts += rx->stats.lro_pkts; + s->gro_merged += rx->stats.lro_merged; + } +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 val1, val2; + + t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + + data += sizeof(struct port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); + data += sizeof(struct queue_port_stats) / sizeof(u64); + if (!is_t4(adapter->params.chip)) { + t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7)); + val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A); + val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A); + *data = val1 - val2; + data++; + *data = val2; + data++; + } else { + memset(data, 0, 2 * sizeof(u64)); + *data += 2; + } +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct adapter *adap = netdev2adap(dev); + size_t buf_size; + + buf_size = t4_get_regs_len(adap); + regs->version = mk_adap_vers(adap); + t4_get_regs(adap, buf, buf_size); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_cfg.autoneg != AUTONEG_ENABLE) + return -EINVAL; + t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); + return 0; +} + +static int identify_port(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + unsigned int val; + struct adapter *adap = netdev2adap(dev); + + if (state == ETHTOOL_ID_ACTIVE) + val = 0xffff; + else if (state == ETHTOOL_ID_INACTIVE) + val = 0; + else + return -EINVAL; + + return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); +} + +static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) +{ + unsigned int v = 0; + + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || + type == FW_PORT_TYPE_BT_XAUI) { + v |= SUPPORTED_TP; + if (caps & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { + v |= SUPPORTED_Backplane; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseKX_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_KR) { + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; + } else if (type == FW_PORT_TYPE_BP_AP) { + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; + } else if (type == FW_PORT_TYPE_BP4_AP) { + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | + SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_FIBER_XFI || + type == FW_PORT_TYPE_FIBER_XAUI || + type == FW_PORT_TYPE_SFP || + type == FW_PORT_TYPE_QSFP_10G || + type == FW_PORT_TYPE_QSA) { + v |= SUPPORTED_FIBRE; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_BP40_BA || + type == FW_PORT_TYPE_QSFP) { + v |= SUPPORTED_40000baseSR4_Full; + v |= SUPPORTED_FIBRE; + } + + if (caps & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + return v; +} + +static unsigned int to_fw_linkcaps(unsigned int caps) +{ + unsigned int v = 0; + + if (caps & ADVERTISED_100baseT_Full) + v |= FW_PORT_CAP_SPEED_100M; + if (caps & ADVERTISED_1000baseT_Full) + v |= FW_PORT_CAP_SPEED_1G; + if (caps & ADVERTISED_10000baseT_Full) + v |= FW_PORT_CAP_SPEED_10G; + if (caps & ADVERTISED_40000baseSR4_Full) + v |= FW_PORT_CAP_SPEED_40G; + return v; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + const struct port_info *p = netdev_priv(dev); + + if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XFI || + p->port_type == FW_PORT_TYPE_BT_XAUI) { + cmd->port = PORT_TP; + } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || + p->port_type == FW_PORT_TYPE_FIBER_XAUI) { + cmd->port = PORT_FIBRE; + } else if (p->port_type == FW_PORT_TYPE_SFP || + p->port_type == FW_PORT_TYPE_QSFP_10G || + p->port_type == FW_PORT_TYPE_QSA || + p->port_type == FW_PORT_TYPE_QSFP) { + if (p->mod_type == FW_PORT_MOD_TYPE_LR || + p->mod_type == FW_PORT_MOD_TYPE_SR || + p->mod_type == FW_PORT_MOD_TYPE_ER || + p->mod_type == FW_PORT_MOD_TYPE_LRM) + cmd->port = PORT_FIBRE; + else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + cmd->port = PORT_DA; + else + cmd->port = PORT_OTHER; + } else { + cmd->port = PORT_OTHER; + } + + if (p->mdio_addr >= 0) { + cmd->phy_address = p->mdio_addr; + cmd->transceiver = XCVR_EXTERNAL; + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + } else { + cmd->phy_address = 0; /* not really, but no better option */ + cmd->transceiver = XCVR_INTERNAL; + cmd->mdio_support = 0; + } + + cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); + cmd->advertising = from_fw_linkcaps(p->port_type, + p->link_cfg.advertising); + ethtool_cmd_speed_set(cmd, + netif_carrier_ok(dev) ? p->link_cfg.speed : 0); + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = p->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static unsigned int speed_to_caps(int speed) +{ + if (speed == 100) + return FW_PORT_CAP_SPEED_100M; + if (speed == 1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == 10000) + return FW_PORT_CAP_SPEED_10G; + if (speed == 40000) + return FW_PORT_CAP_SPEED_40G; + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + unsigned int cap; + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + u32 speed = ethtool_cmd_speed(cmd); + + if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + return -EINVAL; + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + /* PHY offers a single speed. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_caps(speed))) + return 0; + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_to_caps(speed); + + if (!(lc->supported & cap) || + (speed == 1000) || + (speed == 10000) || + (speed == 40000)) + return -EINVAL; + lc->requested_speed = cap; + lc->advertising = 0; + } else { + cap = to_fw_linkcaps(cmd->advertising); + if (!(lc->supported & cap)) + return -EINVAL; + lc->requested_speed = 0; + lc->advertising = cap | FW_PORT_CAP_ANEG; + } + lc->autoneg = cmd->autoneg; + + if (netif_running(dev)) + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & FW_PORT_CAP_ANEG) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (netif_running(dev)) + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + e->rx_jumbo_max_pending = 0; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; + e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + e->rx_jumbo_pending = 0; + e->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + int i; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (i = 0; i < pi->nqsets; ++i) { + s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; + s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; + s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; + } + return 0; +} + +/** + * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! + * @dev: the network device + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Set the RX interrupt hold-off parameters for a network device. + */ +static int set_rx_intr_params(struct net_device *dev, + unsigned int us, unsigned int cnt) +{ + int i, err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (i = 0; i < pi->nqsets; i++, q++) { + err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt); + if (err) + return err; + } + return 0; +} + +static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) +{ + int i; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (i = 0; i < pi->nqsets; i++, q++) + q->rspq.adaptive_rx = adaptive_rx; + + return 0; +} + +static int get_adaptive_rx_setting(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + return q->rspq.adaptive_rx; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); + return set_rx_intr_params(dev, c->rx_coalesce_usecs, + c->rx_max_coalesced_frames); +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; + + c->rx_coalesce_usecs = qtimer_val(adap, rq); + c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? + adap->sge.counter_val[rq->pktcnt_idx] : 0; + c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); + return 0; +} + +/** + * eeprom_ptov - translate a physical EEPROM address to virtual + * @phys_addr: the physical EEPROM address + * @fn: the PCI function number + * @sz: size of function-specific area + * + * Translate a physical EEPROM address to virtual. The first 1K is + * accessed through virtual addresses starting at 31K, the rest is + * accessed through virtual addresses starting at 0. + * + * The mapping is as follows: + * [0..1K) -> [31K..32K) + * [1K..1K+A) -> [31K-A..31K) + * [1K+A..ES) -> [0..ES-A-1K) + * + * where A = @fn * @sz, and ES = EEPROM size. + */ +static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) +{ + fn *= sz; + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < 1024 + fn) + return 31744 - fn + phys_addr - 1024; + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024 - fn; + return -EINVAL; +} + +/* The next two routines implement eeprom read/write from physical addresses. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i, err = 0; + struct adapter *adapter = netdev2adap(dev); + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + struct adapter *adapter = netdev2adap(dev); + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (adapter->fn > 0) { + u32 start = 1024 + adapter->fn * EEPROMPFSIZE; + + if (aligned_offset < start || + aligned_offset + aligned_len > start + EEPROMPFSIZE) + return -EPERM; + } + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + /* RMW possibly needed for first or last words. + */ + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else { + buf = data; + } + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != data) + kfree(buf); + return err; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct adapter *adap = netdev2adap(netdev); + unsigned int mbox = PCIE_FW_MASTER_M + 1; + + ef->data[sizeof(ef->data) - 1] = '\0'; + ret = request_firmware(&fw, ef->data, adap->pdev_dev); + if (ret < 0) + return ret; + + /* If the adapter has been fully initialized then we'll go ahead and + * try to get the firmware's cooperation in upgrading to the new + * firmware image otherwise we'll try to do the entire job from the + * host ... and we always "force" the operation in this path. + */ + if (adap->flags & FULL_INIT_DONE) + mbox = adap->mbox; + + ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); + release_firmware(fw); + if (!ret) + dev_info(adap->pdev_dev, + "loaded firmware %s, reload cxgb4 driver\n", ef->data); + return ret; +} + +#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) +#define BCAST_CRC 0xa0ccc1a6 + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = netdev2adap(dev)->wol; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + int err = 0; + struct port_info *pi = netdev_priv(dev); + + if (wol->wolopts & ~WOL_SUPPORTED) + return -EINVAL; + t4_wol_magic_enable(pi->adapter, pi->tx_chan, + (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); + if (wol->wolopts & WAKE_BCAST) { + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, + ~0ULL, 0, false); + if (!err) + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, + ~6ULL, ~0ULL, BCAST_CRC, true); + } else { + t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); + } + return err; +} + +static u32 get_rss_table_size(struct net_device *dev) +{ + const struct port_info *pi = netdev_priv(dev); + + return pi->rss_size; +} + +static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) +{ + const struct port_info *pi = netdev_priv(dev); + unsigned int n = pi->rss_size; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; + while (n--) + p[n] = pi->rss[n]; + return 0; +} + +static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, + const u8 hfunc) +{ + unsigned int i; + struct port_info *pi = netdev_priv(dev); + + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!p) + return 0; + + for (i = 0; i < pi->rss_size; i++) + pi->rss[i] = p[i]; + if (pi->adapter->flags & FULL_INIT_DONE) + return cxgb4_write_rss(pi, pi->rss); + return 0; +} + +static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules) +{ + const struct port_info *pi = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + unsigned int v = pi->rss_mode; + + info->data = 0; + switch (info->flow_type) { + case TCP_V4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V4_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + } + return 0; + } + case ETHTOOL_GRXRINGS: + info->data = pi->nqsets; + return 0; + } + return -EOPNOTSUPP; +} + +static const struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .set_phys_id = identify_port, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, + .set_wol = set_wol, + .get_rxnfc = get_rxnfc, + .get_rxfh_indir_size = get_rss_table_size, + .get_rxfh = get_rss_table, + .set_rxfh = set_rss_table, + .flash_device = set_flash, +}; + +void cxgb4_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &cxgb_ethtool_ops; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c new file mode 100644 index 000000000000..6c8a62eefe51 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c @@ -0,0 +1,122 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifdef CONFIG_CHELSIO_T4_FCOE + +#include <scsi/fc/fc_fs.h> +#include <scsi/libfcoe.h> +#include "cxgb4.h" + +bool cxgb_fcoe_sof_eof_supported(struct adapter *adap, struct sk_buff *skb) +{ + struct fcoe_hdr *fcoeh = (struct fcoe_hdr *)skb_network_header(skb); + u8 sof = fcoeh->fcoe_sof; + u8 eof = 0; + + if ((sof != FC_SOF_I3) && (sof != FC_SOF_N3)) { + dev_err(adap->pdev_dev, "Unsupported SOF 0x%x\n", sof); + return false; + } + + skb_copy_bits(skb, skb->len - 4, &eof, 1); + + if ((eof != FC_EOF_N) && (eof != FC_EOF_T)) { + dev_err(adap->pdev_dev, "Unsupported EOF 0x%x\n", eof); + return false; + } + + return true; +} + +/** + * cxgb_fcoe_enable - enable FCoE offload features + * @netdev: net device + * + * Returns 0 on success or -EINVAL on failure. + */ +int cxgb_fcoe_enable(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + struct cxgb_fcoe *fcoe = &pi->fcoe; + + if (is_t4(adap->params.chip)) + return -EINVAL; + + if (!(adap->flags & FULL_INIT_DONE)) + return -EINVAL; + + dev_info(adap->pdev_dev, "Enabling FCoE offload features\n"); + + netdev->features |= NETIF_F_FCOE_CRC; + netdev->vlan_features |= NETIF_F_FCOE_CRC; + netdev->features |= NETIF_F_FCOE_MTU; + netdev->vlan_features |= NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + fcoe->flags |= CXGB_FCOE_ENABLED; + + return 0; +} + +/** + * cxgb_fcoe_disable - disable FCoE offload + * @netdev: net device + * + * Returns 0 on success or -EINVAL on failure. + */ +int cxgb_fcoe_disable(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + struct cxgb_fcoe *fcoe = &pi->fcoe; + + if (!(fcoe->flags & CXGB_FCOE_ENABLED)) + return -EINVAL; + + dev_info(adap->pdev_dev, "Disabling FCoE offload features\n"); + + fcoe->flags &= ~CXGB_FCOE_ENABLED; + + netdev->features &= ~NETIF_F_FCOE_CRC; + netdev->vlan_features &= ~NETIF_F_FCOE_CRC; + netdev->features &= ~NETIF_F_FCOE_MTU; + netdev->vlan_features &= ~NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + return 0; +} +#endif /* CONFIG_CHELSIO_T4_FCOE */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h new file mode 100644 index 000000000000..bf9258a56ac9 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h @@ -0,0 +1,57 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_FCOE_H__ +#define __CXGB4_FCOE_H__ + +#ifdef CONFIG_CHELSIO_T4_FCOE + +#define CXGB_FCOE_TXPKT_CSUM_START 28 +#define CXGB_FCOE_TXPKT_CSUM_END 8 + +/* fcoe flags */ +enum { + CXGB_FCOE_ENABLED = (1 << 0), +}; + +struct cxgb_fcoe { + u8 flags; +}; + +int cxgb_fcoe_enable(struct net_device *); +int cxgb_fcoe_disable(struct net_device *); +bool cxgb_fcoe_sof_eof_supported(struct adapter *, struct sk_buff *); + +#endif /* CONFIG_CHELSIO_T4_FCOE */ +#endif /* __CXGB4_FCOE_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a22cf932ca35..803d91beec6f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -76,23 +76,15 @@ #include "clip_tbl.h" #include "l2t.h" +char cxgb4_driver_name[] = KBUILD_MODNAME; + #ifdef DRV_VERSION #undef DRV_VERSION #endif #define DRV_VERSION "2.0.0-ko" +const char cxgb4_driver_version[] = DRV_VERSION; #define DRV_DESC "Chelsio T4/T5 Network Driver" -enum { - MAX_TXQ_ENTRIES = 16384, - MAX_CTRL_TXQ_ENTRIES = 1024, - MAX_RSPQ_ENTRIES = 16384, - MAX_RX_BUFFERS = 16384, - MIN_TXQ_ENTRIES = 32, - MIN_CTRL_TXQ_ENTRIES = 32, - MIN_RSPQ_ENTRIES = 128, - MIN_FL_ENTRIES = 16 -}; - /* Host shadow copy of ingress filter entry. This is in host native format * and doesn't match the ordering or bit order, etc. of the hardware of the * firmware command. The use of bit-field structure elements is purely to @@ -124,7 +116,7 @@ struct filter_entry { /* Macros needed to support the PCI Device ID Table ... */ #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ - static struct pci_device_id cxgb4_pci_tbl[] = { + static const struct pci_device_id cxgb4_pci_tbl[] = { #define CH_PCI_DEVICE_ID_FUNCTION 0x4 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is @@ -732,7 +724,8 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie) adap->swintr = 1; t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); } - t4_slow_intr_handler(adap); + if (adap->flags & MASTER_PF) + t4_slow_intr_handler(adap); return IRQ_HANDLED; } @@ -857,14 +850,14 @@ static void free_msix_queue_irqs(struct adapter *adap) } /** - * write_rss - write the RSS table for a given port + * cxgb4_write_rss - write the RSS table for a given port * @pi: the port * @queues: array of queue indices for RSS * * Sets up the portion of the HW RSS table for the port's VI to distribute * packets to the Rx queues in @queues. */ -static int write_rss(const struct port_info *pi, const u16 *queues) +int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) { u16 *rss; int i, err; @@ -897,7 +890,7 @@ static int setup_rss(struct adapter *adap) for_each_port(adap, i) { const struct port_info *pi = adap2pinfo(adap, i); - err = write_rss(pi, pi->rss); + err = cxgb4_write_rss(pi, pi->rss); if (err) return err; } @@ -920,7 +913,7 @@ static void quiesce_rx(struct adapter *adap) { int i; - for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + for (i = 0; i < adap->sge.ingr_sz; i++) { struct sge_rspq *q = adap->sge.ingr_map[i]; if (q && q->handler) { @@ -934,6 +927,21 @@ static void quiesce_rx(struct adapter *adap) } } +/* Disable interrupt and napi handler */ +static void disable_interrupts(struct adapter *adap) +{ + if (adap->flags & FULL_INIT_DONE) { + t4_intr_disable(adap); + if (adap->flags & USING_MSIX) { + free_msix_queue_irqs(adap); + free_irq(adap->msix_info[0].vec, adap); + } else { + free_irq(adap->pdev->irq, adap); + } + quiesce_rx(adap); + } +} + /* * Enable NAPI scheduling and interrupt generation for all Rx queues. */ @@ -941,7 +949,7 @@ static void enable_rx(struct adapter *adap) { int i; - for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + for (i = 0; i < adap->sge.ingr_sz; i++) { struct sge_rspq *q = adap->sge.ingr_map[i]; if (!q) @@ -957,6 +965,28 @@ static void enable_rx(struct adapter *adap) } } +static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, + unsigned int nq, unsigned int per_chan, int msi_idx, + u16 *ids) +{ + int i, err; + + for (i = 0; i < nq; i++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, + adap->port[i / per_chan], + msi_idx, q->fl.size ? &q->fl : NULL, + uldrx_handler); + if (err) + return err; + memset(&q->stats, 0, sizeof(q->stats)); + if (ids) + ids[i] = q->rspq.abs_id; + } + return 0; +} + /** * setup_sge_queues - configure SGE Tx/Rx/response queues * @adap: the adapter @@ -970,8 +1000,8 @@ static int setup_sge_queues(struct adapter *adap) int err, msi_idx, i, j; struct sge *s = &adap->sge; - bitmap_zero(s->starving_fl, MAX_EGRQ); - bitmap_zero(s->txq_maperr, MAX_EGRQ); + bitmap_zero(s->starving_fl, s->egr_sz); + bitmap_zero(s->txq_maperr, s->egr_sz); if (adap->flags & USING_MSIX) msi_idx = 1; /* vector 0 is for non-queue interrupts */ @@ -983,6 +1013,19 @@ static int setup_sge_queues(struct adapter *adap) msi_idx = -((int)s->intrq.abs_id + 1); } + /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, + * don't forget to update the following which need to be + * synchronized to and changes here. + * + * 1. The calculations of MAX_INGQ in cxgb4.h. + * + * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs + * to accommodate any new/deleted Ingress Queues + * which need MSI-X Vectors. + * + * 3. Update sge_qinfo_show() to include information on the + * new/deleted queues. + */ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], msi_idx, NULL, fwevtq_handler); if (err) { @@ -1018,51 +1061,27 @@ freeout: t4_free_sge_resources(adap); j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ for_each_ofldrxq(s, i) { - struct sge_ofld_rxq *q = &s->ofldrxq[i]; - struct net_device *dev = adap->port[i / j]; - - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, - q->fl.size ? &q->fl : NULL, - uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->ofld_rxq[i] = q->rspq.abs_id; - err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, + err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], + adap->port[i / j], s->fw_evtq.cntxt_id); if (err) goto freeout; } - for_each_rdmarxq(s, i) { - struct sge_ofld_rxq *q = &s->rdmarxq[i]; - - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], - msi_idx, q->fl.size ? &q->fl : NULL, - uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->rdma_rxq[i] = q->rspq.abs_id; - } +#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \ + err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ + if (err) \ + goto freeout; \ + if (msi_idx > 0) \ + msi_idx += nq; \ +} while (0) - for_each_rdmaciq(s, i) { - struct sge_ofld_rxq *q = &s->rdmaciq[i]; + ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq); + ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); + j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ + ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], - msi_idx, q->fl.size ? &q->fl : NULL, - uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->rdma_ciq[i] = q->rspq.abs_id; - } +#undef ALLOC_OFLD_RXQS for_each_port(adap, i) { /* @@ -1121,6 +1140,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx) struct fw_filter_wr *fwr; unsigned int ftid; + skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); + if (!skb) + return -ENOMEM; + /* If the new filter requires loopback Destination MAC and/or VLAN * rewriting then we need to allocate a Layer 2 Table (L2T) entry for * the filter. @@ -1128,19 +1151,21 @@ static int set_filter_wr(struct adapter *adapter, int fidx) if (f->fs.newdmac || f->fs.newvlan) { /* allocate L2T entry for new filter */ f->l2t = t4_l2t_alloc_switching(adapter->l2t); - if (f->l2t == NULL) + if (f->l2t == NULL) { + kfree_skb(skb); return -EAGAIN; + } if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, f->fs.eport, f->fs.dmac)) { cxgb4_l2t_release(f->l2t); f->l2t = NULL; + kfree_skb(skb); return -ENOMEM; } } ftid = adapter->tids.ftid_base + fidx; - skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL); fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); memset(fwr, 0, sizeof(*fwr)); @@ -1238,7 +1263,10 @@ static int del_filter_wr(struct adapter *adapter, int fidx) len = sizeof(*fwr); ftid = adapter->tids.ftid_base + fidx; - skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + fwr = (struct fw_filter_wr *)__skb_put(skb, len); t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); @@ -1273,6 +1301,10 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, txq = 0; } else { txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +#ifdef CONFIG_CHELSIO_T4_FCOE + if (skb->protocol == htons(ETH_P_FCOE)) + txq = skb->priority & 0x7; +#endif /* CONFIG_CHELSIO_T4_FCOE */ } return txq; } @@ -1297,1192 +1329,6 @@ static inline int is_offload(const struct adapter *adap) return adap->params.offload; } -/* - * Implementation of ethtool operations. - */ - -static u32 get_msglevel(struct net_device *dev) -{ - return netdev2adap(dev)->msg_enable; -} - -static void set_msglevel(struct net_device *dev, u32 val) -{ - netdev2adap(dev)->msg_enable = val; -} - -static char stats_strings[][ETH_GSTRING_LEN] = { - "TxOctetsOK ", - "TxFramesOK ", - "TxBroadcastFrames ", - "TxMulticastFrames ", - "TxUnicastFrames ", - "TxErrorFrames ", - - "TxFrames64 ", - "TxFrames65To127 ", - "TxFrames128To255 ", - "TxFrames256To511 ", - "TxFrames512To1023 ", - "TxFrames1024To1518 ", - "TxFrames1519ToMax ", - - "TxFramesDropped ", - "TxPauseFrames ", - "TxPPP0Frames ", - "TxPPP1Frames ", - "TxPPP2Frames ", - "TxPPP3Frames ", - "TxPPP4Frames ", - "TxPPP5Frames ", - "TxPPP6Frames ", - "TxPPP7Frames ", - - "RxOctetsOK ", - "RxFramesOK ", - "RxBroadcastFrames ", - "RxMulticastFrames ", - "RxUnicastFrames ", - - "RxFramesTooLong ", - "RxJabberErrors ", - "RxFCSErrors ", - "RxLengthErrors ", - "RxSymbolErrors ", - "RxRuntFrames ", - - "RxFrames64 ", - "RxFrames65To127 ", - "RxFrames128To255 ", - "RxFrames256To511 ", - "RxFrames512To1023 ", - "RxFrames1024To1518 ", - "RxFrames1519ToMax ", - - "RxPauseFrames ", - "RxPPP0Frames ", - "RxPPP1Frames ", - "RxPPP2Frames ", - "RxPPP3Frames ", - "RxPPP4Frames ", - "RxPPP5Frames ", - "RxPPP6Frames ", - "RxPPP7Frames ", - - "RxBG0FramesDropped ", - "RxBG1FramesDropped ", - "RxBG2FramesDropped ", - "RxBG3FramesDropped ", - "RxBG0FramesTrunc ", - "RxBG1FramesTrunc ", - "RxBG2FramesTrunc ", - "RxBG3FramesTrunc ", - - "TSO ", - "TxCsumOffload ", - "RxCsumGood ", - "VLANextractions ", - "VLANinsertions ", - "GROpackets ", - "GROmerged ", - "WriteCoalSuccess ", - "WriteCoalFail ", -}; - -static int get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(stats_strings); - default: - return -EOPNOTSUPP; - } -} - -#define T4_REGMAP_SIZE (160 * 1024) -#define T5_REGMAP_SIZE (332 * 1024) - -static int get_regs_len(struct net_device *dev) -{ - struct adapter *adap = netdev2adap(dev); - if (is_t4(adap->params.chip)) - return T4_REGMAP_SIZE; - else - return T5_REGMAP_SIZE; -} - -static int get_eeprom_len(struct net_device *dev) -{ - return EEPROMSIZE; -} - -static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct adapter *adapter = netdev2adap(dev); - u32 exprom_vers; - - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(adapter->pdev), - sizeof(info->bus_info)); - - if (adapter->params.fw_vers) - snprintf(info->fw_version, sizeof(info->fw_version), - "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); - - if (!t4_get_exprom_version(adapter, &exprom_vers)) - snprintf(info->erom_version, sizeof(info->erom_version), - "%u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_G(exprom_vers), - FW_HDR_FW_VER_MINOR_G(exprom_vers), - FW_HDR_FW_VER_MICRO_G(exprom_vers), - FW_HDR_FW_VER_BUILD_G(exprom_vers)); -} - -static void get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - if (stringset == ETH_SS_STATS) - memcpy(data, stats_strings, sizeof(stats_strings)); -} - -/* - * port stats maintained per queue of the port. They should be in the same - * order as in stats_strings above. - */ -struct queue_port_stats { - u64 tso; - u64 tx_csum; - u64 rx_csum; - u64 vlan_ex; - u64 vlan_ins; - u64 gro_pkts; - u64 gro_merged; -}; - -static void collect_sge_port_stats(const struct adapter *adap, - const struct port_info *p, struct queue_port_stats *s) -{ - int i; - const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; - const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; - - memset(s, 0, sizeof(*s)); - for (i = 0; i < p->nqsets; i++, rx++, tx++) { - s->tso += tx->tso; - s->tx_csum += tx->tx_cso; - s->rx_csum += rx->stats.rx_cso; - s->vlan_ex += rx->stats.vlan_ex; - s->vlan_ins += tx->vlan_ins; - s->gro_pkts += rx->stats.lro_pkts; - s->gro_merged += rx->stats.lro_merged; - } -} - -static void get_stats(struct net_device *dev, struct ethtool_stats *stats, - u64 *data) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - u32 val1, val2; - - t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); - - data += sizeof(struct port_stats) / sizeof(u64); - collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); - data += sizeof(struct queue_port_stats) / sizeof(u64); - if (!is_t4(adapter->params.chip)) { - t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7)); - val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A); - val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A); - *data = val1 - val2; - data++; - *data = val2; - data++; - } else { - memset(data, 0, 2 * sizeof(u64)); - *data += 2; - } -} - -/* - * Return a version number to identify the type of adapter. The scheme is: - * - bits 0..9: chip version - * - bits 10..15: chip revision - * - bits 16..23: register dump version - */ -static inline unsigned int mk_adap_vers(const struct adapter *ap) -{ - return CHELSIO_CHIP_VERSION(ap->params.chip) | - (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); -} - -static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, - unsigned int end) -{ - u32 *p = buf + start; - - for ( ; start <= end; start += sizeof(u32)) - *p++ = t4_read_reg(ap, start); -} - -static void get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *buf) -{ - static const unsigned int t4_reg_ranges[] = { - 0x1008, 0x1108, - 0x1180, 0x11b4, - 0x11fc, 0x123c, - 0x1300, 0x173c, - 0x1800, 0x18fc, - 0x3000, 0x30d8, - 0x30e0, 0x5924, - 0x5960, 0x59d4, - 0x5a00, 0x5af8, - 0x6000, 0x6098, - 0x6100, 0x6150, - 0x6200, 0x6208, - 0x6240, 0x6248, - 0x6280, 0x6338, - 0x6370, 0x638c, - 0x6400, 0x643c, - 0x6500, 0x6524, - 0x6a00, 0x6a38, - 0x6a60, 0x6a78, - 0x6b00, 0x6b84, - 0x6bf0, 0x6c84, - 0x6cf0, 0x6d84, - 0x6df0, 0x6e84, - 0x6ef0, 0x6f84, - 0x6ff0, 0x7084, - 0x70f0, 0x7184, - 0x71f0, 0x7284, - 0x72f0, 0x7384, - 0x73f0, 0x7450, - 0x7500, 0x7530, - 0x7600, 0x761c, - 0x7680, 0x76cc, - 0x7700, 0x7798, - 0x77c0, 0x77fc, - 0x7900, 0x79fc, - 0x7b00, 0x7c38, - 0x7d00, 0x7efc, - 0x8dc0, 0x8e1c, - 0x8e30, 0x8e78, - 0x8ea0, 0x8f6c, - 0x8fc0, 0x9074, - 0x90fc, 0x90fc, - 0x9400, 0x9458, - 0x9600, 0x96bc, - 0x9800, 0x9808, - 0x9820, 0x983c, - 0x9850, 0x9864, - 0x9c00, 0x9c6c, - 0x9c80, 0x9cec, - 0x9d00, 0x9d6c, - 0x9d80, 0x9dec, - 0x9e00, 0x9e6c, - 0x9e80, 0x9eec, - 0x9f00, 0x9f6c, - 0x9f80, 0x9fec, - 0xd004, 0xd03c, - 0xdfc0, 0xdfe0, - 0xe000, 0xea7c, - 0xf000, 0x11110, - 0x11118, 0x11190, - 0x19040, 0x1906c, - 0x19078, 0x19080, - 0x1908c, 0x19124, - 0x19150, 0x191b0, - 0x191d0, 0x191e8, - 0x19238, 0x1924c, - 0x193f8, 0x19474, - 0x19490, 0x194f8, - 0x19800, 0x19f30, - 0x1a000, 0x1a06c, - 0x1a0b0, 0x1a120, - 0x1a128, 0x1a138, - 0x1a190, 0x1a1c4, - 0x1a1fc, 0x1a1fc, - 0x1e040, 0x1e04c, - 0x1e284, 0x1e28c, - 0x1e2c0, 0x1e2c0, - 0x1e2e0, 0x1e2e0, - 0x1e300, 0x1e384, - 0x1e3c0, 0x1e3c8, - 0x1e440, 0x1e44c, - 0x1e684, 0x1e68c, - 0x1e6c0, 0x1e6c0, - 0x1e6e0, 0x1e6e0, - 0x1e700, 0x1e784, - 0x1e7c0, 0x1e7c8, - 0x1e840, 0x1e84c, - 0x1ea84, 0x1ea8c, - 0x1eac0, 0x1eac0, - 0x1eae0, 0x1eae0, - 0x1eb00, 0x1eb84, - 0x1ebc0, 0x1ebc8, - 0x1ec40, 0x1ec4c, - 0x1ee84, 0x1ee8c, - 0x1eec0, 0x1eec0, - 0x1eee0, 0x1eee0, - 0x1ef00, 0x1ef84, - 0x1efc0, 0x1efc8, - 0x1f040, 0x1f04c, - 0x1f284, 0x1f28c, - 0x1f2c0, 0x1f2c0, - 0x1f2e0, 0x1f2e0, - 0x1f300, 0x1f384, - 0x1f3c0, 0x1f3c8, - 0x1f440, 0x1f44c, - 0x1f684, 0x1f68c, - 0x1f6c0, 0x1f6c0, - 0x1f6e0, 0x1f6e0, - 0x1f700, 0x1f784, - 0x1f7c0, 0x1f7c8, - 0x1f840, 0x1f84c, - 0x1fa84, 0x1fa8c, - 0x1fac0, 0x1fac0, - 0x1fae0, 0x1fae0, - 0x1fb00, 0x1fb84, - 0x1fbc0, 0x1fbc8, - 0x1fc40, 0x1fc4c, - 0x1fe84, 0x1fe8c, - 0x1fec0, 0x1fec0, - 0x1fee0, 0x1fee0, - 0x1ff00, 0x1ff84, - 0x1ffc0, 0x1ffc8, - 0x20000, 0x2002c, - 0x20100, 0x2013c, - 0x20190, 0x201c8, - 0x20200, 0x20318, - 0x20400, 0x20528, - 0x20540, 0x20614, - 0x21000, 0x21040, - 0x2104c, 0x21060, - 0x210c0, 0x210ec, - 0x21200, 0x21268, - 0x21270, 0x21284, - 0x212fc, 0x21388, - 0x21400, 0x21404, - 0x21500, 0x21518, - 0x2152c, 0x2153c, - 0x21550, 0x21554, - 0x21600, 0x21600, - 0x21608, 0x21628, - 0x21630, 0x2163c, - 0x21700, 0x2171c, - 0x21780, 0x2178c, - 0x21800, 0x21c38, - 0x21c80, 0x21d7c, - 0x21e00, 0x21e04, - 0x22000, 0x2202c, - 0x22100, 0x2213c, - 0x22190, 0x221c8, - 0x22200, 0x22318, - 0x22400, 0x22528, - 0x22540, 0x22614, - 0x23000, 0x23040, - 0x2304c, 0x23060, - 0x230c0, 0x230ec, - 0x23200, 0x23268, - 0x23270, 0x23284, - 0x232fc, 0x23388, - 0x23400, 0x23404, - 0x23500, 0x23518, - 0x2352c, 0x2353c, - 0x23550, 0x23554, - 0x23600, 0x23600, - 0x23608, 0x23628, - 0x23630, 0x2363c, - 0x23700, 0x2371c, - 0x23780, 0x2378c, - 0x23800, 0x23c38, - 0x23c80, 0x23d7c, - 0x23e00, 0x23e04, - 0x24000, 0x2402c, - 0x24100, 0x2413c, - 0x24190, 0x241c8, - 0x24200, 0x24318, - 0x24400, 0x24528, - 0x24540, 0x24614, - 0x25000, 0x25040, - 0x2504c, 0x25060, - 0x250c0, 0x250ec, - 0x25200, 0x25268, - 0x25270, 0x25284, - 0x252fc, 0x25388, - 0x25400, 0x25404, - 0x25500, 0x25518, - 0x2552c, 0x2553c, - 0x25550, 0x25554, - 0x25600, 0x25600, - 0x25608, 0x25628, - 0x25630, 0x2563c, - 0x25700, 0x2571c, - 0x25780, 0x2578c, - 0x25800, 0x25c38, - 0x25c80, 0x25d7c, - 0x25e00, 0x25e04, - 0x26000, 0x2602c, - 0x26100, 0x2613c, - 0x26190, 0x261c8, - 0x26200, 0x26318, - 0x26400, 0x26528, - 0x26540, 0x26614, - 0x27000, 0x27040, - 0x2704c, 0x27060, - 0x270c0, 0x270ec, - 0x27200, 0x27268, - 0x27270, 0x27284, - 0x272fc, 0x27388, - 0x27400, 0x27404, - 0x27500, 0x27518, - 0x2752c, 0x2753c, - 0x27550, 0x27554, - 0x27600, 0x27600, - 0x27608, 0x27628, - 0x27630, 0x2763c, - 0x27700, 0x2771c, - 0x27780, 0x2778c, - 0x27800, 0x27c38, - 0x27c80, 0x27d7c, - 0x27e00, 0x27e04 - }; - - static const unsigned int t5_reg_ranges[] = { - 0x1008, 0x1148, - 0x1180, 0x11b4, - 0x11fc, 0x123c, - 0x1280, 0x173c, - 0x1800, 0x18fc, - 0x3000, 0x3028, - 0x3060, 0x30d8, - 0x30e0, 0x30fc, - 0x3140, 0x357c, - 0x35a8, 0x35cc, - 0x35ec, 0x35ec, - 0x3600, 0x5624, - 0x56cc, 0x575c, - 0x580c, 0x5814, - 0x5890, 0x58bc, - 0x5940, 0x59dc, - 0x59fc, 0x5a18, - 0x5a60, 0x5a9c, - 0x5b9c, 0x5bfc, - 0x6000, 0x6040, - 0x6058, 0x614c, - 0x7700, 0x7798, - 0x77c0, 0x78fc, - 0x7b00, 0x7c54, - 0x7d00, 0x7efc, - 0x8dc0, 0x8de0, - 0x8df8, 0x8e84, - 0x8ea0, 0x8f84, - 0x8fc0, 0x90f8, - 0x9400, 0x9470, - 0x9600, 0x96f4, - 0x9800, 0x9808, - 0x9820, 0x983c, - 0x9850, 0x9864, - 0x9c00, 0x9c6c, - 0x9c80, 0x9cec, - 0x9d00, 0x9d6c, - 0x9d80, 0x9dec, - 0x9e00, 0x9e6c, - 0x9e80, 0x9eec, - 0x9f00, 0x9f6c, - 0x9f80, 0xa020, - 0xd004, 0xd03c, - 0xdfc0, 0xdfe0, - 0xe000, 0x11088, - 0x1109c, 0x11110, - 0x11118, 0x1117c, - 0x11190, 0x11204, - 0x19040, 0x1906c, - 0x19078, 0x19080, - 0x1908c, 0x19124, - 0x19150, 0x191b0, - 0x191d0, 0x191e8, - 0x19238, 0x19290, - 0x193f8, 0x19474, - 0x19490, 0x194cc, - 0x194f0, 0x194f8, - 0x19c00, 0x19c60, - 0x19c94, 0x19e10, - 0x19e50, 0x19f34, - 0x19f40, 0x19f50, - 0x19f90, 0x19fe4, - 0x1a000, 0x1a06c, - 0x1a0b0, 0x1a120, - 0x1a128, 0x1a138, - 0x1a190, 0x1a1c4, - 0x1a1fc, 0x1a1fc, - 0x1e008, 0x1e00c, - 0x1e040, 0x1e04c, - 0x1e284, 0x1e290, - 0x1e2c0, 0x1e2c0, - 0x1e2e0, 0x1e2e0, - 0x1e300, 0x1e384, - 0x1e3c0, 0x1e3c8, - 0x1e408, 0x1e40c, - 0x1e440, 0x1e44c, - 0x1e684, 0x1e690, - 0x1e6c0, 0x1e6c0, - 0x1e6e0, 0x1e6e0, - 0x1e700, 0x1e784, - 0x1e7c0, 0x1e7c8, - 0x1e808, 0x1e80c, - 0x1e840, 0x1e84c, - 0x1ea84, 0x1ea90, - 0x1eac0, 0x1eac0, - 0x1eae0, 0x1eae0, - 0x1eb00, 0x1eb84, - 0x1ebc0, 0x1ebc8, - 0x1ec08, 0x1ec0c, - 0x1ec40, 0x1ec4c, - 0x1ee84, 0x1ee90, - 0x1eec0, 0x1eec0, - 0x1eee0, 0x1eee0, - 0x1ef00, 0x1ef84, - 0x1efc0, 0x1efc8, - 0x1f008, 0x1f00c, - 0x1f040, 0x1f04c, - 0x1f284, 0x1f290, - 0x1f2c0, 0x1f2c0, - 0x1f2e0, 0x1f2e0, - 0x1f300, 0x1f384, - 0x1f3c0, 0x1f3c8, - 0x1f408, 0x1f40c, - 0x1f440, 0x1f44c, - 0x1f684, 0x1f690, - 0x1f6c0, 0x1f6c0, - 0x1f6e0, 0x1f6e0, - 0x1f700, 0x1f784, - 0x1f7c0, 0x1f7c8, - 0x1f808, 0x1f80c, - 0x1f840, 0x1f84c, - 0x1fa84, 0x1fa90, - 0x1fac0, 0x1fac0, - 0x1fae0, 0x1fae0, - 0x1fb00, 0x1fb84, - 0x1fbc0, 0x1fbc8, - 0x1fc08, 0x1fc0c, - 0x1fc40, 0x1fc4c, - 0x1fe84, 0x1fe90, - 0x1fec0, 0x1fec0, - 0x1fee0, 0x1fee0, - 0x1ff00, 0x1ff84, - 0x1ffc0, 0x1ffc8, - 0x30000, 0x30030, - 0x30100, 0x30144, - 0x30190, 0x301d0, - 0x30200, 0x30318, - 0x30400, 0x3052c, - 0x30540, 0x3061c, - 0x30800, 0x30834, - 0x308c0, 0x30908, - 0x30910, 0x309ac, - 0x30a00, 0x30a04, - 0x30a0c, 0x30a2c, - 0x30a44, 0x30a50, - 0x30a74, 0x30c24, - 0x30d08, 0x30d14, - 0x30d1c, 0x30d20, - 0x30d3c, 0x30d50, - 0x31200, 0x3120c, - 0x31220, 0x31220, - 0x31240, 0x31240, - 0x31600, 0x31600, - 0x31608, 0x3160c, - 0x31a00, 0x31a1c, - 0x31e04, 0x31e20, - 0x31e38, 0x31e3c, - 0x31e80, 0x31e80, - 0x31e88, 0x31ea8, - 0x31eb0, 0x31eb4, - 0x31ec8, 0x31ed4, - 0x31fb8, 0x32004, - 0x32208, 0x3223c, - 0x32600, 0x32630, - 0x32a00, 0x32abc, - 0x32b00, 0x32b70, - 0x33000, 0x33048, - 0x33060, 0x3309c, - 0x330f0, 0x33148, - 0x33160, 0x3319c, - 0x331f0, 0x332e4, - 0x332f8, 0x333e4, - 0x333f8, 0x33448, - 0x33460, 0x3349c, - 0x334f0, 0x33548, - 0x33560, 0x3359c, - 0x335f0, 0x336e4, - 0x336f8, 0x337e4, - 0x337f8, 0x337fc, - 0x33814, 0x33814, - 0x3382c, 0x3382c, - 0x33880, 0x3388c, - 0x338e8, 0x338ec, - 0x33900, 0x33948, - 0x33960, 0x3399c, - 0x339f0, 0x33ae4, - 0x33af8, 0x33b10, - 0x33b28, 0x33b28, - 0x33b3c, 0x33b50, - 0x33bf0, 0x33c10, - 0x33c28, 0x33c28, - 0x33c3c, 0x33c50, - 0x33cf0, 0x33cfc, - 0x34000, 0x34030, - 0x34100, 0x34144, - 0x34190, 0x341d0, - 0x34200, 0x34318, - 0x34400, 0x3452c, - 0x34540, 0x3461c, - 0x34800, 0x34834, - 0x348c0, 0x34908, - 0x34910, 0x349ac, - 0x34a00, 0x34a04, - 0x34a0c, 0x34a2c, - 0x34a44, 0x34a50, - 0x34a74, 0x34c24, - 0x34d08, 0x34d14, - 0x34d1c, 0x34d20, - 0x34d3c, 0x34d50, - 0x35200, 0x3520c, - 0x35220, 0x35220, - 0x35240, 0x35240, - 0x35600, 0x35600, - 0x35608, 0x3560c, - 0x35a00, 0x35a1c, - 0x35e04, 0x35e20, - 0x35e38, 0x35e3c, - 0x35e80, 0x35e80, - 0x35e88, 0x35ea8, - 0x35eb0, 0x35eb4, - 0x35ec8, 0x35ed4, - 0x35fb8, 0x36004, - 0x36208, 0x3623c, - 0x36600, 0x36630, - 0x36a00, 0x36abc, - 0x36b00, 0x36b70, - 0x37000, 0x37048, - 0x37060, 0x3709c, - 0x370f0, 0x37148, - 0x37160, 0x3719c, - 0x371f0, 0x372e4, - 0x372f8, 0x373e4, - 0x373f8, 0x37448, - 0x37460, 0x3749c, - 0x374f0, 0x37548, - 0x37560, 0x3759c, - 0x375f0, 0x376e4, - 0x376f8, 0x377e4, - 0x377f8, 0x377fc, - 0x37814, 0x37814, - 0x3782c, 0x3782c, - 0x37880, 0x3788c, - 0x378e8, 0x378ec, - 0x37900, 0x37948, - 0x37960, 0x3799c, - 0x379f0, 0x37ae4, - 0x37af8, 0x37b10, - 0x37b28, 0x37b28, - 0x37b3c, 0x37b50, - 0x37bf0, 0x37c10, - 0x37c28, 0x37c28, - 0x37c3c, 0x37c50, - 0x37cf0, 0x37cfc, - 0x38000, 0x38030, - 0x38100, 0x38144, - 0x38190, 0x381d0, - 0x38200, 0x38318, - 0x38400, 0x3852c, - 0x38540, 0x3861c, - 0x38800, 0x38834, - 0x388c0, 0x38908, - 0x38910, 0x389ac, - 0x38a00, 0x38a04, - 0x38a0c, 0x38a2c, - 0x38a44, 0x38a50, - 0x38a74, 0x38c24, - 0x38d08, 0x38d14, - 0x38d1c, 0x38d20, - 0x38d3c, 0x38d50, - 0x39200, 0x3920c, - 0x39220, 0x39220, - 0x39240, 0x39240, - 0x39600, 0x39600, - 0x39608, 0x3960c, - 0x39a00, 0x39a1c, - 0x39e04, 0x39e20, - 0x39e38, 0x39e3c, - 0x39e80, 0x39e80, - 0x39e88, 0x39ea8, - 0x39eb0, 0x39eb4, - 0x39ec8, 0x39ed4, - 0x39fb8, 0x3a004, - 0x3a208, 0x3a23c, - 0x3a600, 0x3a630, - 0x3aa00, 0x3aabc, - 0x3ab00, 0x3ab70, - 0x3b000, 0x3b048, - 0x3b060, 0x3b09c, - 0x3b0f0, 0x3b148, - 0x3b160, 0x3b19c, - 0x3b1f0, 0x3b2e4, - 0x3b2f8, 0x3b3e4, - 0x3b3f8, 0x3b448, - 0x3b460, 0x3b49c, - 0x3b4f0, 0x3b548, - 0x3b560, 0x3b59c, - 0x3b5f0, 0x3b6e4, - 0x3b6f8, 0x3b7e4, - 0x3b7f8, 0x3b7fc, - 0x3b814, 0x3b814, - 0x3b82c, 0x3b82c, - 0x3b880, 0x3b88c, - 0x3b8e8, 0x3b8ec, - 0x3b900, 0x3b948, - 0x3b960, 0x3b99c, - 0x3b9f0, 0x3bae4, - 0x3baf8, 0x3bb10, - 0x3bb28, 0x3bb28, - 0x3bb3c, 0x3bb50, - 0x3bbf0, 0x3bc10, - 0x3bc28, 0x3bc28, - 0x3bc3c, 0x3bc50, - 0x3bcf0, 0x3bcfc, - 0x3c000, 0x3c030, - 0x3c100, 0x3c144, - 0x3c190, 0x3c1d0, - 0x3c200, 0x3c318, - 0x3c400, 0x3c52c, - 0x3c540, 0x3c61c, - 0x3c800, 0x3c834, - 0x3c8c0, 0x3c908, - 0x3c910, 0x3c9ac, - 0x3ca00, 0x3ca04, - 0x3ca0c, 0x3ca2c, - 0x3ca44, 0x3ca50, - 0x3ca74, 0x3cc24, - 0x3cd08, 0x3cd14, - 0x3cd1c, 0x3cd20, - 0x3cd3c, 0x3cd50, - 0x3d200, 0x3d20c, - 0x3d220, 0x3d220, - 0x3d240, 0x3d240, - 0x3d600, 0x3d600, - 0x3d608, 0x3d60c, - 0x3da00, 0x3da1c, - 0x3de04, 0x3de20, - 0x3de38, 0x3de3c, - 0x3de80, 0x3de80, - 0x3de88, 0x3dea8, - 0x3deb0, 0x3deb4, - 0x3dec8, 0x3ded4, - 0x3dfb8, 0x3e004, - 0x3e208, 0x3e23c, - 0x3e600, 0x3e630, - 0x3ea00, 0x3eabc, - 0x3eb00, 0x3eb70, - 0x3f000, 0x3f048, - 0x3f060, 0x3f09c, - 0x3f0f0, 0x3f148, - 0x3f160, 0x3f19c, - 0x3f1f0, 0x3f2e4, - 0x3f2f8, 0x3f3e4, - 0x3f3f8, 0x3f448, - 0x3f460, 0x3f49c, - 0x3f4f0, 0x3f548, - 0x3f560, 0x3f59c, - 0x3f5f0, 0x3f6e4, - 0x3f6f8, 0x3f7e4, - 0x3f7f8, 0x3f7fc, - 0x3f814, 0x3f814, - 0x3f82c, 0x3f82c, - 0x3f880, 0x3f88c, - 0x3f8e8, 0x3f8ec, - 0x3f900, 0x3f948, - 0x3f960, 0x3f99c, - 0x3f9f0, 0x3fae4, - 0x3faf8, 0x3fb10, - 0x3fb28, 0x3fb28, - 0x3fb3c, 0x3fb50, - 0x3fbf0, 0x3fc10, - 0x3fc28, 0x3fc28, - 0x3fc3c, 0x3fc50, - 0x3fcf0, 0x3fcfc, - 0x40000, 0x4000c, - 0x40040, 0x40068, - 0x40080, 0x40144, - 0x40180, 0x4018c, - 0x40200, 0x40298, - 0x402ac, 0x4033c, - 0x403f8, 0x403fc, - 0x41304, 0x413c4, - 0x41400, 0x4141c, - 0x41480, 0x414d0, - 0x44000, 0x44078, - 0x440c0, 0x44278, - 0x442c0, 0x44478, - 0x444c0, 0x44678, - 0x446c0, 0x44878, - 0x448c0, 0x449fc, - 0x45000, 0x45068, - 0x45080, 0x45084, - 0x450a0, 0x450b0, - 0x45200, 0x45268, - 0x45280, 0x45284, - 0x452a0, 0x452b0, - 0x460c0, 0x460e4, - 0x47000, 0x4708c, - 0x47200, 0x47250, - 0x47400, 0x47420, - 0x47600, 0x47618, - 0x47800, 0x47814, - 0x48000, 0x4800c, - 0x48040, 0x48068, - 0x48080, 0x48144, - 0x48180, 0x4818c, - 0x48200, 0x48298, - 0x482ac, 0x4833c, - 0x483f8, 0x483fc, - 0x49304, 0x493c4, - 0x49400, 0x4941c, - 0x49480, 0x494d0, - 0x4c000, 0x4c078, - 0x4c0c0, 0x4c278, - 0x4c2c0, 0x4c478, - 0x4c4c0, 0x4c678, - 0x4c6c0, 0x4c878, - 0x4c8c0, 0x4c9fc, - 0x4d000, 0x4d068, - 0x4d080, 0x4d084, - 0x4d0a0, 0x4d0b0, - 0x4d200, 0x4d268, - 0x4d280, 0x4d284, - 0x4d2a0, 0x4d2b0, - 0x4e0c0, 0x4e0e4, - 0x4f000, 0x4f08c, - 0x4f200, 0x4f250, - 0x4f400, 0x4f420, - 0x4f600, 0x4f618, - 0x4f800, 0x4f814, - 0x50000, 0x500cc, - 0x50400, 0x50400, - 0x50800, 0x508cc, - 0x50c00, 0x50c00, - 0x51000, 0x5101c, - 0x51300, 0x51308, - }; - - int i; - struct adapter *ap = netdev2adap(dev); - static const unsigned int *reg_ranges; - int arr_size = 0, buf_size = 0; - - if (is_t4(ap->params.chip)) { - reg_ranges = &t4_reg_ranges[0]; - arr_size = ARRAY_SIZE(t4_reg_ranges); - buf_size = T4_REGMAP_SIZE; - } else { - reg_ranges = &t5_reg_ranges[0]; - arr_size = ARRAY_SIZE(t5_reg_ranges); - buf_size = T5_REGMAP_SIZE; - } - - regs->version = mk_adap_vers(ap); - - memset(buf, 0, buf_size); - for (i = 0; i < arr_size; i += 2) - reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); -} - -static int restart_autoneg(struct net_device *dev) -{ - struct port_info *p = netdev_priv(dev); - - if (!netif_running(dev)) - return -EAGAIN; - if (p->link_cfg.autoneg != AUTONEG_ENABLE) - return -EINVAL; - t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); - return 0; -} - -static int identify_port(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - unsigned int val; - struct adapter *adap = netdev2adap(dev); - - if (state == ETHTOOL_ID_ACTIVE) - val = 0xffff; - else if (state == ETHTOOL_ID_INACTIVE) - val = 0; - else - return -EINVAL; - - return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); -} - -static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) -{ - unsigned int v = 0; - - if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || - type == FW_PORT_TYPE_BT_XAUI) { - v |= SUPPORTED_TP; - if (caps & FW_PORT_CAP_SPEED_100M) - v |= SUPPORTED_100baseT_Full; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { - v |= SUPPORTED_Backplane; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseKX_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_KR) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; - else if (type == FW_PORT_TYPE_BP_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; - else if (type == FW_PORT_TYPE_BP4_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | - SUPPORTED_10000baseKX4_Full; - else if (type == FW_PORT_TYPE_FIBER_XFI || - type == FW_PORT_TYPE_FIBER_XAUI || - type == FW_PORT_TYPE_SFP || - type == FW_PORT_TYPE_QSFP_10G || - type == FW_PORT_TYPE_QSA) { - v |= SUPPORTED_FIBRE; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_BP40_BA || - type == FW_PORT_TYPE_QSFP) { - v |= SUPPORTED_40000baseSR4_Full; - v |= SUPPORTED_FIBRE; - } - - if (caps & FW_PORT_CAP_ANEG) - v |= SUPPORTED_Autoneg; - return v; -} - -static unsigned int to_fw_linkcaps(unsigned int caps) -{ - unsigned int v = 0; - - if (caps & ADVERTISED_100baseT_Full) - v |= FW_PORT_CAP_SPEED_100M; - if (caps & ADVERTISED_1000baseT_Full) - v |= FW_PORT_CAP_SPEED_1G; - if (caps & ADVERTISED_10000baseT_Full) - v |= FW_PORT_CAP_SPEED_10G; - if (caps & ADVERTISED_40000baseSR4_Full) - v |= FW_PORT_CAP_SPEED_40G; - return v; -} - -static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - const struct port_info *p = netdev_priv(dev); - - if (p->port_type == FW_PORT_TYPE_BT_SGMII || - p->port_type == FW_PORT_TYPE_BT_XFI || - p->port_type == FW_PORT_TYPE_BT_XAUI) - cmd->port = PORT_TP; - else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || - p->port_type == FW_PORT_TYPE_FIBER_XAUI) - cmd->port = PORT_FIBRE; - else if (p->port_type == FW_PORT_TYPE_SFP || - p->port_type == FW_PORT_TYPE_QSFP_10G || - p->port_type == FW_PORT_TYPE_QSA || - p->port_type == FW_PORT_TYPE_QSFP) { - if (p->mod_type == FW_PORT_MOD_TYPE_LR || - p->mod_type == FW_PORT_MOD_TYPE_SR || - p->mod_type == FW_PORT_MOD_TYPE_ER || - p->mod_type == FW_PORT_MOD_TYPE_LRM) - cmd->port = PORT_FIBRE; - else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || - p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) - cmd->port = PORT_DA; - else - cmd->port = PORT_OTHER; - } else - cmd->port = PORT_OTHER; - - if (p->mdio_addr >= 0) { - cmd->phy_address = p->mdio_addr; - cmd->transceiver = XCVR_EXTERNAL; - cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? - MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; - } else { - cmd->phy_address = 0; /* not really, but no better option */ - cmd->transceiver = XCVR_INTERNAL; - cmd->mdio_support = 0; - } - - cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); - cmd->advertising = from_fw_linkcaps(p->port_type, - p->link_cfg.advertising); - ethtool_cmd_speed_set(cmd, - netif_carrier_ok(dev) ? p->link_cfg.speed : 0); - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = p->link_cfg.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -static unsigned int speed_to_caps(int speed) -{ - if (speed == 100) - return FW_PORT_CAP_SPEED_100M; - if (speed == 1000) - return FW_PORT_CAP_SPEED_1G; - if (speed == 10000) - return FW_PORT_CAP_SPEED_10G; - if (speed == 40000) - return FW_PORT_CAP_SPEED_40G; - return 0; -} - -static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - unsigned int cap; - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_cfg; - u32 speed = ethtool_cmd_speed(cmd); - - if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ - return -EINVAL; - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - /* - * PHY offers a single speed. See if that's what's - * being requested. - */ - if (cmd->autoneg == AUTONEG_DISABLE && - (lc->supported & speed_to_caps(speed))) - return 0; - return -EINVAL; - } - - if (cmd->autoneg == AUTONEG_DISABLE) { - cap = speed_to_caps(speed); - - if (!(lc->supported & cap) || - (speed == 1000) || - (speed == 10000) || - (speed == 40000)) - return -EINVAL; - lc->requested_speed = cap; - lc->advertising = 0; - } else { - cap = to_fw_linkcaps(cmd->advertising); - if (!(lc->supported & cap)) - return -EINVAL; - lc->requested_speed = 0; - lc->advertising = cap | FW_PORT_CAP_ANEG; - } - lc->autoneg = cmd->autoneg; - - if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, - lc); - return 0; -} - -static void get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct port_info *p = netdev_priv(dev); - - epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; - epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; -} - -static int set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_cfg; - - if (epause->autoneg == AUTONEG_DISABLE) - lc->requested_fc = 0; - else if (lc->supported & FW_PORT_CAP_ANEG) - lc->requested_fc = PAUSE_AUTONEG; - else - return -EINVAL; - - if (epause->rx_pause) - lc->requested_fc |= PAUSE_RX; - if (epause->tx_pause) - lc->requested_fc |= PAUSE_TX; - if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, - lc); - return 0; -} - -static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - const struct port_info *pi = netdev_priv(dev); - const struct sge *s = &pi->adapter->sge; - - e->rx_max_pending = MAX_RX_BUFFERS; - e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; - e->rx_jumbo_max_pending = 0; - e->tx_max_pending = MAX_TXQ_ENTRIES; - - e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; - e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; - e->rx_jumbo_pending = 0; - e->tx_pending = s->ethtxq[pi->first_qset].q.size; -} - -static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - int i; - const struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct sge *s = &adapter->sge; - - if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || - e->tx_pending > MAX_TXQ_ENTRIES || - e->rx_mini_pending > MAX_RSPQ_ENTRIES || - e->rx_mini_pending < MIN_RSPQ_ENTRIES || - e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) - return -EINVAL; - - if (adapter->flags & FULL_INIT_DONE) - return -EBUSY; - - for (i = 0; i < pi->nqsets; ++i) { - s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; - s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; - s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; - } - return 0; -} - static int closest_timer(const struct sge *s, int time) { int i, delta, match = 0, min_delta = INT_MAX; @@ -2515,19 +1361,8 @@ static int closest_thres(const struct sge *s, int thres) return match; } -/* - * Return a queue's interrupt hold-off time in us. 0 means no timer. - */ -unsigned int qtimer_val(const struct adapter *adap, - const struct sge_rspq *q) -{ - unsigned int idx = q->intr_params >> 1; - - return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; -} - /** - * set_rspq_intr_params - set a queue's interrupt holdoff parameters + * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters * @q: the Rx queue * @us: the hold-off time in us, or 0 to disable timer * @cnt: the hold-off packet count, or 0 to disable counter @@ -2535,8 +1370,8 @@ unsigned int qtimer_val(const struct adapter *adap, * Sets an Rx queue's interrupt hold-off time and packet count. At least * one of the two needs to be enabled for the queue to generate interrupts. */ -static int set_rspq_intr_params(struct sge_rspq *q, - unsigned int us, unsigned int cnt) +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, + unsigned int us, unsigned int cnt) { struct adapter *adap = q->adap; @@ -2567,259 +1402,6 @@ static int set_rspq_intr_params(struct sge_rspq *q, return 0; } -/** - * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! - * @dev: the network device - * @us: the hold-off time in us, or 0 to disable timer - * @cnt: the hold-off packet count, or 0 to disable counter - * - * Set the RX interrupt hold-off parameters for a network device. - */ -static int set_rx_intr_params(struct net_device *dev, - unsigned int us, unsigned int cnt) -{ - int i, err; - struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; - - for (i = 0; i < pi->nqsets; i++, q++) { - err = set_rspq_intr_params(&q->rspq, us, cnt); - if (err) - return err; - } - return 0; -} - -static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) -{ - int i; - struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; - - for (i = 0; i < pi->nqsets; i++, q++) - q->rspq.adaptive_rx = adaptive_rx; - - return 0; -} - -static int get_adaptive_rx_setting(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; - - return q->rspq.adaptive_rx; -} - -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); - return set_rx_intr_params(dev, c->rx_coalesce_usecs, - c->rx_max_coalesced_frames); -} - -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - const struct port_info *pi = netdev_priv(dev); - const struct adapter *adap = pi->adapter; - const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; - - c->rx_coalesce_usecs = qtimer_val(adap, rq); - c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? - adap->sge.counter_val[rq->pktcnt_idx] : 0; - c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); - return 0; -} - -/** - * eeprom_ptov - translate a physical EEPROM address to virtual - * @phys_addr: the physical EEPROM address - * @fn: the PCI function number - * @sz: size of function-specific area - * - * Translate a physical EEPROM address to virtual. The first 1K is - * accessed through virtual addresses starting at 31K, the rest is - * accessed through virtual addresses starting at 0. - * - * The mapping is as follows: - * [0..1K) -> [31K..32K) - * [1K..1K+A) -> [31K-A..31K) - * [1K+A..ES) -> [0..ES-A-1K) - * - * where A = @fn * @sz, and ES = EEPROM size. - */ -static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) -{ - fn *= sz; - if (phys_addr < 1024) - return phys_addr + (31 << 10); - if (phys_addr < 1024 + fn) - return 31744 - fn + phys_addr - 1024; - if (phys_addr < EEPROMSIZE) - return phys_addr - 1024 - fn; - return -EINVAL; -} - -/* - * The next two routines implement eeprom read/write from physical addresses. - */ -static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) -{ - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); - - if (vaddr >= 0) - vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); - return vaddr < 0 ? vaddr : 0; -} - -static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) -{ - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); - - if (vaddr >= 0) - vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); - return vaddr < 0 ? vaddr : 0; -} - -#define EEPROM_MAGIC 0x38E2F10C - -static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, - u8 *data) -{ - int i, err = 0; - struct adapter *adapter = netdev2adap(dev); - - u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - e->magic = EEPROM_MAGIC; - for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) - err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); - - if (!err) - memcpy(data, buf + e->offset, e->len); - kfree(buf); - return err; -} - -static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 *data) -{ - u8 *buf; - int err = 0; - u32 aligned_offset, aligned_len, *p; - struct adapter *adapter = netdev2adap(dev); - - if (eeprom->magic != EEPROM_MAGIC) - return -EINVAL; - - aligned_offset = eeprom->offset & ~3; - aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; - - if (adapter->fn > 0) { - u32 start = 1024 + adapter->fn * EEPROMPFSIZE; - - if (aligned_offset < start || - aligned_offset + aligned_len > start + EEPROMPFSIZE) - return -EPERM; - } - - if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { - /* - * RMW possibly needed for first or last words. - */ - buf = kmalloc(aligned_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); - if (!err && aligned_len > 4) - err = eeprom_rd_phys(adapter, - aligned_offset + aligned_len - 4, - (u32 *)&buf[aligned_len - 4]); - if (err) - goto out; - memcpy(buf + (eeprom->offset & 3), data, eeprom->len); - } else - buf = data; - - err = t4_seeprom_wp(adapter, false); - if (err) - goto out; - - for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { - err = eeprom_wr_phys(adapter, aligned_offset, *p); - aligned_offset += 4; - } - - if (!err) - err = t4_seeprom_wp(adapter, true); -out: - if (buf != data) - kfree(buf); - return err; -} - -static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) -{ - int ret; - const struct firmware *fw; - struct adapter *adap = netdev2adap(netdev); - unsigned int mbox = PCIE_FW_MASTER_M + 1; - - ef->data[sizeof(ef->data) - 1] = '\0'; - ret = request_firmware(&fw, ef->data, adap->pdev_dev); - if (ret < 0) - return ret; - - /* If the adapter has been fully initialized then we'll go ahead and - * try to get the firmware's cooperation in upgrading to the new - * firmware image otherwise we'll try to do the entire job from the - * host ... and we always "force" the operation in this path. - */ - if (adap->flags & FULL_INIT_DONE) - mbox = adap->mbox; - - ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); - release_firmware(fw); - if (!ret) - dev_info(adap->pdev_dev, "loaded firmware %s," - " reload cxgb4 driver\n", ef->data); - return ret; -} - -#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) -#define BCAST_CRC 0xa0ccc1a6 - -static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - wol->supported = WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = netdev2adap(dev)->wol; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - int err = 0; - struct port_info *pi = netdev_priv(dev); - - if (wol->wolopts & ~WOL_SUPPORTED) - return -EINVAL; - t4_wol_magic_enable(pi->adapter, pi->tx_chan, - (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); - if (wol->wolopts & WAKE_BCAST) { - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, - ~0ULL, 0, false); - if (!err) - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, - ~6ULL, ~0ULL, BCAST_CRC, true); - } else - t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); - return err; -} - static int cxgb_set_features(struct net_device *dev, netdev_features_t features) { const struct port_info *pi = netdev_priv(dev); @@ -2837,144 +1419,6 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features) return err; } -static u32 get_rss_table_size(struct net_device *dev) -{ - const struct port_info *pi = netdev_priv(dev); - - return pi->rss_size; -} - -static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) -{ - const struct port_info *pi = netdev_priv(dev); - unsigned int n = pi->rss_size; - - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!p) - return 0; - while (n--) - p[n] = pi->rss[n]; - return 0; -} - -static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, - const u8 hfunc) -{ - unsigned int i; - struct port_info *pi = netdev_priv(dev); - - /* We require at least one supported parameter to be changed and no - * change in any of the unsupported parameters - */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) - return -EOPNOTSUPP; - if (!p) - return 0; - - for (i = 0; i < pi->rss_size; i++) - pi->rss[i] = p[i]; - if (pi->adapter->flags & FULL_INIT_DONE) - return write_rss(pi, pi->rss); - return 0; -} - -static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules) -{ - const struct port_info *pi = netdev_priv(dev); - - switch (info->cmd) { - case ETHTOOL_GRXFH: { - unsigned int v = pi->rss_mode; - - info->data = 0; - switch (info->flow_type) { - case TCP_V4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V4_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case IPV4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V6_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case IPV6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - } - return 0; - } - case ETHTOOL_GRXRINGS: - info->data = pi->nqsets; - return 0; - } - return -EOPNOTSUPP; -} - -static const struct ethtool_ops cxgb_ethtool_ops = { - .get_settings = get_settings, - .set_settings = set_settings, - .get_drvinfo = get_drvinfo, - .get_msglevel = get_msglevel, - .set_msglevel = set_msglevel, - .get_ringparam = get_sge_param, - .set_ringparam = set_sge_param, - .get_coalesce = get_coalesce, - .set_coalesce = set_coalesce, - .get_eeprom_len = get_eeprom_len, - .get_eeprom = get_eeprom, - .set_eeprom = set_eeprom, - .get_pauseparam = get_pauseparam, - .set_pauseparam = set_pauseparam, - .get_link = ethtool_op_get_link, - .get_strings = get_strings, - .set_phys_id = identify_port, - .nway_reset = restart_autoneg, - .get_sset_count = get_sset_count, - .get_ethtool_stats = get_stats, - .get_regs_len = get_regs_len, - .get_regs = get_regs, - .get_wol = get_wol, - .set_wol = set_wol, - .get_rxnfc = get_rxnfc, - .get_rxfh_indir_size = get_rss_table_size, - .get_rxfh = get_rss_table, - .set_rxfh = set_rss_table, - .flash_device = set_flash, -}; - static int setup_debugfs(struct adapter *adap) { if (IS_ERR_OR_NULL(adap->debugfs_root)) @@ -4244,19 +2688,12 @@ static int cxgb_up(struct adapter *adap) static void cxgb_down(struct adapter *adapter) { - t4_intr_disable(adapter); cancel_work_sync(&adapter->tid_release_task); cancel_work_sync(&adapter->db_full_task); cancel_work_sync(&adapter->db_drop_task); adapter->tid_release_task_busy = false; adapter->tid_release_head = NULL; - if (adapter->flags & USING_MSIX) { - free_msix_queue_irqs(adapter); - free_irq(adapter->msix_info[0].vec, adapter); - } else - free_irq(adapter->pdev->irq, adapter); - quiesce_rx(adapter); t4_sge_stop(adapter); t4_free_sge_resources(adapter); adapter->flags &= ~FULL_INIT_DONE; @@ -4580,6 +3017,10 @@ static const struct net_device_ops cxgb4_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb_netpoll, #endif +#ifdef CONFIG_CHELSIO_T4_FCOE + .ndo_fcoe_enable = cxgb_fcoe_enable, + .ndo_fcoe_disable = cxgb_fcoe_disable, +#endif /* CONFIG_CHELSIO_T4_FCOE */ #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = cxgb_busy_poll, #endif @@ -4733,8 +3174,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) if (ret < 0) return ret; - ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, - 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); + ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, + MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, + FW_CMD_CAP_PF); if (ret < 0) return ret; @@ -5088,10 +3530,15 @@ static int adap_init0(struct adapter *adap) enum dev_state state; u32 params[7], val[7]; struct fw_caps_config_cmd caps_cmd; - struct fw_devlog_cmd devlog_cmd; - u32 devlog_meminfo; int reset = 1; + /* Grab Firmware Device Log parameters as early as possible so we have + * access to it for debugging, etc. + */ + ret = t4_init_devlog_params(adap); + if (ret < 0) + return ret; + /* Contact FW, advertising Master capability */ ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); if (ret < 0) { @@ -5169,30 +3616,6 @@ static int adap_init0(struct adapter *adap) if (ret < 0) goto bye; - /* Read firmware device log parameters. We really need to find a way - * to get these parameters initialized with some default values (which - * are likely to be correct) for the case where we either don't - * attache to the firmware or it's crashed when we probe the adapter. - * That way we'll still be able to perform early firmware startup - * debugging ... If the request to get the Firmware's Device Log - * parameters fails, we'll live so we don't make that a fatal error. - */ - memset(&devlog_cmd, 0, sizeof(devlog_cmd)); - devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | - FW_CMD_REQUEST_F | FW_CMD_READ_F); - devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); - ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), - &devlog_cmd); - if (ret == 0) { - devlog_meminfo = - ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); - adap->params.devlog.memtype = - FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); - adap->params.devlog.start = - FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; - adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog); - } - /* * Find out what ports are available to us. Note that we need to do * this before calling adap_init0_no_config() since it needs nports @@ -5293,6 +3716,51 @@ static int adap_init0(struct adapter *adap) adap->tids.nftids = val[4] - val[3] + 1; adap->sge.ingr_start = val[5]; + /* qids (ingress/egress) returned from firmware can be anywhere + * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. + * Hence driver needs to allocate memory for this range to + * store the queue info. Get the highest IQFLINT/EQ index returned + * in FW_EQ_*_CMD.alloc command. + */ + params[0] = FW_PARAM_PFVF(EQ_END); + params[1] = FW_PARAM_PFVF(IQFLINT_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; + adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; + + adap->sge.egr_map = kcalloc(adap->sge.egr_sz, + sizeof(*adap->sge.egr_map), GFP_KERNEL); + if (!adap->sge.egr_map) { + ret = -ENOMEM; + goto bye; + } + + adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, + sizeof(*adap->sge.ingr_map), GFP_KERNEL); + if (!adap->sge.ingr_map) { + ret = -ENOMEM; + goto bye; + } + + /* Allocate the memory for the vaious egress queue bitmaps + * ie starving_fl and txq_maperr. + */ + adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), + sizeof(long), GFP_KERNEL); + if (!adap->sge.starving_fl) { + ret = -ENOMEM; + goto bye; + } + + adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), + sizeof(long), GFP_KERNEL); + if (!adap->sge.txq_maperr) { + ret = -ENOMEM; + goto bye; + } + params[0] = FW_PARAM_PFVF(CLIP_START); params[1] = FW_PARAM_PFVF(CLIP_END); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); @@ -5368,7 +3836,7 @@ static int adap_init0(struct adapter *adap) adap->tids.stid_base = val[1]; adap->tids.nstids = val[2] - val[1] + 1; /* - * Setup server filter region. Divide the availble filter + * Setup server filter region. Divide the available filter * region into two parts. Regular filters get 1/3rd and server * filters get 2/3rd part. This is only enabled if workarond * path is enabled. @@ -5501,6 +3969,10 @@ static int adap_init0(struct adapter *adap) * happened to HW/FW, stop issuing commands. */ bye: + kfree(adap->sge.egr_map); + kfree(adap->sge.ingr_map); + kfree(adap->sge.starving_fl); + kfree(adap->sge.txq_maperr); if (ret != -ETIMEDOUT && ret != -EIO) t4_fw_bye(adap, adap->mbox); return ret; @@ -5528,6 +4000,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, netif_carrier_off(dev); } spin_unlock(&adap->stats_lock); + disable_interrupts(adap); if (adap->flags & FULL_INIT_DONE) cxgb_down(adap); rtnl_unlock(); @@ -5630,7 +4103,7 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int size, unsigned int iqe_size) { q->adap = adap; - set_rspq_intr_params(q, us, cnt); + cxgb4_set_rspq_intr_params(q, us, cnt); q->iqe_len = iqe_size; q->size = size; } @@ -5705,7 +4178,16 @@ static void cfg_queues(struct adapter *adap) s->ofldqsets = adap->params.nports; /* For RDMA one Rx queue per channel suffices */ s->rdmaqs = adap->params.nports; - s->rdmaciqs = adap->params.nports; + /* Try and allow at least 1 CIQ per cpu rounding down + * to the number of ports, with a minimum of 1 per port. + * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port. + * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port. + * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port. + */ + s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus()); + s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * + adap->params.nports; + s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); } for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { @@ -5791,12 +4273,17 @@ static void reduce_ethqs(struct adapter *adap, int n) static int enable_msix(struct adapter *adap) { int ofld_need = 0; - int i, want, need; + int i, want, need, allocated; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; - struct msix_entry entries[MAX_INGQ + 1]; + struct msix_entry *entries; - for (i = 0; i < ARRAY_SIZE(entries); ++i) + entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1), + GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < MAX_INGQ + 1; ++i) entries[i].entry = i; want = s->max_ethqsets + EXTRA_VECS; @@ -5813,29 +4300,39 @@ static int enable_msix(struct adapter *adap) #else need = adap->params.nports + EXTRA_VECS + ofld_need; #endif - want = pci_enable_msix_range(adap->pdev, entries, need, want); - if (want < 0) - return want; + allocated = pci_enable_msix_range(adap->pdev, entries, need, want); + if (allocated < 0) { + dev_info(adap->pdev_dev, "not enough MSI-X vectors left," + " not using MSI-X\n"); + kfree(entries); + return allocated; + } - /* - * Distribute available vectors to the various queue groups. + /* Distribute available vectors to the various queue groups. * Every group gets its minimum requirement and NIC gets top * priority for leftovers. */ - i = want - EXTRA_VECS - ofld_need; + i = allocated - EXTRA_VECS - ofld_need; if (i < s->max_ethqsets) { s->max_ethqsets = i; if (i < s->ethqsets) reduce_ethqs(adap, i); } if (is_offload(adap)) { - i = want - EXTRA_VECS - s->max_ethqsets; - i -= ofld_need - nchan; + if (allocated < want) { + s->rdmaqs = nchan; + s->rdmaciqs = nchan; + } + + /* leftovers go to OFLD */ + i = allocated - EXTRA_VECS - s->max_ethqsets - + s->rdmaqs - s->rdmaciqs; s->ofldqsets = (i / nchan) * nchan; /* round down */ } - for (i = 0; i < want; ++i) + for (i = 0; i < allocated; ++i) adap->msix_info[i].vec = entries[i].vector; + kfree(entries); return 0; } @@ -5912,6 +4409,10 @@ static void free_some_resources(struct adapter *adapter) t4_free_mem(adapter->l2t); t4_free_mem(adapter->tids.tid_tab); + kfree(adapter->sge.egr_map); + kfree(adapter->sge.ingr_map); + kfree(adapter->sge.starving_fl); + kfree(adapter->sge.txq_maperr); disable_msi(adapter); for_each_port(adapter, i) @@ -6097,7 +4598,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->dcbnl_ops = &cxgb4_dcb_ops; cxgb4_dcb_state_init(netdev); #endif - netdev->ethtool_ops = &cxgb_ethtool_ops; + cxgb4_set_ethtool_ops(netdev); } pci_set_drvdata(pdev, adapter); @@ -6237,6 +4738,8 @@ static void remove_one(struct pci_dev *pdev) if (is_offload(adapter)) detach_ulds(adapter); + disable_interrupts(adapter); + for_each_port(adapter, i) if (adapter->port[i]->reg_state == NETREG_REGISTERED) unregister_netdev(adapter->port[i]); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b4b9f6048fe7..0d2eddab04ef 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -46,6 +46,9 @@ #ifdef CONFIG_NET_RX_BUSY_POLL #include <net/busy_poll.h> #endif /* CONFIG_NET_RX_BUSY_POLL */ +#ifdef CONFIG_CHELSIO_T4_FCOE +#include <scsi/fc/fc_fcoe.h> +#endif /* CONFIG_CHELSIO_T4_FCOE */ #include "cxgb4.h" #include "t4_regs.h" #include "t4_values.h" @@ -118,12 +121,6 @@ #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) /* - * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will - * attempt to refill it. - */ -#define FL_STARVE_THRES 4 - -/* * Suspend an Ethernet Tx queue with fewer available descriptors than this. * This is the same as calc_tx_descs() for a TSO packet with * nr_frags == MAX_SKB_FRAGS. @@ -141,7 +138,7 @@ * Max Tx descriptor space we allow for an Ethernet packet to be inlined * into a WR. */ -#define MAX_IMM_TX_PKT_LEN 128 +#define MAX_IMM_TX_PKT_LEN 256 /* * Max size of a WR sent through a control Tx queue. @@ -245,9 +242,21 @@ static inline unsigned int fl_cap(const struct sge_fl *fl) return fl->size - 8; /* 1 descriptor = 8 buffers */ } -static inline bool fl_starving(const struct sge_fl *fl) +/** + * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter + * @fl: the Free List + * + * Tests specified Free List to see whether the number of buffers + * available to the hardware has falled below our "starvation" + * threshold. + */ +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) { - return fl->avail - fl->pend_cred <= FL_STARVE_THRES; + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; } static int map_skb(struct device *dev, const struct sk_buff *skb, @@ -583,8 +592,10 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, unsigned int cred = q->avail; __be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + int node; gfp |= __GFP_NOWARN; + node = dev_to_node(adap->pdev_dev); if (s->fl_pg_order == 0) goto alloc_small_pages; @@ -593,7 +604,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, * Prefer large buffers */ while (n) { - pg = __dev_alloc_pages(gfp, s->fl_pg_order); + pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); if (unlikely(!pg)) { q->large_alloc_failed++; break; /* fall back to single pages */ @@ -623,7 +634,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, alloc_small_pages: while (n--) { - pg = __dev_alloc_page(gfp); + pg = alloc_pages_node(node, gfp, 0); if (unlikely(!pg)) { q->alloc_failed++; break; @@ -652,7 +663,7 @@ out: cred = q->avail - cred; q->pend_cred += cred; ring_fl_db(adap, q); - if (unlikely(fl_starving(q))) { + if (unlikely(fl_starving(adap, q))) { smp_wmb(); set_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.starving_fl); @@ -719,6 +730,22 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, */ static inline unsigned int sgl_len(unsigned int n) { + /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ n--; return (3 * n) / 2 + (n & 1) + 2; } @@ -766,12 +793,30 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) unsigned int flits; int hdrlen = is_eth_imm(skb); + /* If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (hdrlen) return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); + /* Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; if (skb_shinfo(skb)->gso_size) - flits += 2; + flits += (sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); return flits; } @@ -1044,6 +1089,38 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n) q->pidx -= q->size; } +#ifdef CONFIG_CHELSIO_T4_FCOE +static inline int +cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, + const struct port_info *pi, u64 *cntrl) +{ + const struct cxgb_fcoe *fcoe = &pi->fcoe; + + if (!(fcoe->flags & CXGB_FCOE_ENABLED)) + return 0; + + if (skb->protocol != htons(ETH_P_FCOE)) + return 0; + + skb_reset_mac_header(skb); + skb->mac_len = sizeof(struct ethhdr); + + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); + + if (!cxgb_fcoe_sof_eof_supported(adap, skb)) + return -ENOTSUPP; + + /* FC CRC offload */ + *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) | + TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS | + TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) | + TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) | + TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END); + return 0; +} +#endif /* CONFIG_CHELSIO_T4_FCOE */ + /** * t4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet @@ -1066,6 +1143,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) const struct skb_shared_info *ssi; dma_addr_t addr[MAX_SKB_FRAGS + 1]; bool immediate = false; +#ifdef CONFIG_CHELSIO_T4_FCOE + int err; +#endif /* CONFIG_CHELSIO_T4_FCOE */ /* * The chip min packet length is 10 octets but play safe and reject @@ -1082,6 +1162,13 @@ out_free: dev_kfree_skb_any(skb); q = &adap->sge.ethtxq[qidx + pi->first_qset]; reclaim_completed_tx(adap, &q->q, true); + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + +#ifdef CONFIG_CHELSIO_T4_FCOE + err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); + if (unlikely(err == -ENOTSUPP)) + goto out_free; +#endif /* CONFIG_CHELSIO_T4_FCOE */ flits = calc_tx_flits(skb); ndesc = flits_to_desc(flits); @@ -1153,13 +1240,17 @@ out_free: dev_kfree_skb_any(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; q->tx_cso++; - } else - cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } } if (skb_vlan_tag_present(skb)) { q->vlan_ins++; cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb)); +#ifdef CONFIG_CHELSIO_T4_FCOE + if (skb->protocol == htons(ETH_P_FCOE)) + cntrl |= TXPKT_VLAN( + ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); +#endif /* CONFIG_CHELSIO_T4_FCOE */ } cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | @@ -1759,6 +1850,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sge *s = &q->adap->sge; int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; +#ifdef CONFIG_CHELSIO_T4_FCOE + struct port_info *pi; +#endif if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); @@ -1799,8 +1893,24 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, skb->ip_summed = CHECKSUM_COMPLETE; rxq->stats.rx_cso++; } - } else + } else { skb_checksum_none_assert(skb); +#ifdef CONFIG_CHELSIO_T4_FCOE +#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ + RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) + + pi = netdev_priv(skb->dev); + if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { + if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && + (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { + if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } + +#undef CPL_RX_PKT_FLAGS +#endif /* CONFIG_CHELSIO_T4_FCOE */ + } if (unlikely(pkt->vlan_ex)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); @@ -1900,7 +2010,7 @@ static int process_responses(struct sge_rspq *q, int budget) if (!is_new_response(rc, q)) break; - rmb(); + dma_rmb(); rsp_type = RSPD_TYPE(rc->type_gen); if (likely(rsp_type == RSP_TYPE_FLBUF)) { struct page_frag *fp; @@ -2092,7 +2202,7 @@ static unsigned int process_intrq(struct adapter *adap) if (!is_new_response(rc, q)) break; - rmb(); + dma_rmb(); if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { unsigned int qid = ntohl(rc->pldbuflen_qid); @@ -2128,7 +2238,8 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie) { struct adapter *adap = cookie; - t4_slow_intr_handler(adap); + if (adap->flags & MASTER_PF) + t4_slow_intr_handler(adap); process_intrq(adap); return IRQ_HANDLED; } @@ -2143,7 +2254,8 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie) struct adapter *adap = cookie; t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); - if (t4_slow_intr_handler(adap) | process_intrq(adap)) + if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | + process_intrq(adap)) return IRQ_HANDLED; return IRQ_NONE; /* probably shared interrupt */ } @@ -2171,7 +2283,7 @@ static void sge_rx_timer_cb(unsigned long data) struct adapter *adap = (struct adapter *)data; struct sge *s = &adap->sge; - for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) + for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) for (m = s->starving_fl[i]; m; m &= m - 1) { struct sge_eth_rxq *rxq; unsigned int id = __ffs(m) + i * BITS_PER_LONG; @@ -2180,7 +2292,7 @@ static void sge_rx_timer_cb(unsigned long data) clear_bit(id, s->starving_fl); smp_mb__after_atomic(); - if (fl_starving(fl)) { + if (fl_starving(adap, fl)) { rxq = container_of(fl, struct sge_eth_rxq, fl); if (napi_reschedule(&rxq->rspq.napi)) fl->starving++; @@ -2259,7 +2371,7 @@ static void sge_tx_timer_cb(unsigned long data) struct adapter *adap = (struct adapter *)data; struct sge *s = &adap->sge; - for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) + for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) for (m = s->txq_maperr[i]; m; m &= m - 1) { unsigned long id = __ffs(m) + i * BITS_PER_LONG; struct sge_ofld_txq *txq = s->egr_map[id]; @@ -2741,7 +2853,8 @@ void t4_free_sge_resources(struct adapter *adap) free_rspq_fl(adap, &adap->sge.intrq, NULL); /* clear the reverse egress queue map */ - memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); + memset(adap->sge.egr_map, 0, + adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); } void t4_sge_start(struct adapter *adap) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4d643b65265e..5959e3ae72da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC * @addr: address within indicated memory type * @len: amount of memory to transfer - * @buf: host memory buffer + * @hbuf: host memory buffer * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) * * Reads/writes an [almost] arbitrary memory region in the firmware: the @@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) * caller's responsibility to perform appropriate byte order conversions. */ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, - u32 len, __be32 *buf, int dir) + u32 len, void *hbuf, int dir) { u32 pos, offset, resid, memoffset; u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + u32 *buf; /* Argument sanity checks ... */ - if (addr & 0x3) + if (addr & 0x3 || (uintptr_t)hbuf & 0x3) return -EINVAL; + buf = (u32 *)hbuf; /* It's convenient to be able to handle lengths which aren't a * multiple of 32-bits because we often end up transferring files to @@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, /* Transfer data to/from the adapter as long as there's an integral * number of 32-bit transfers to complete. + * + * A note on Endianness issues: + * + * The "register" reads and writes below from/to the PCI-E Memory + * Window invoke the standard adapter Big-Endian to PCI-E Link + * Little-Endian "swizzel." As a result, if we have the following + * data in adapter memory: + * + * Memory: ... | b0 | b1 | b2 | b3 | ... + * Address: i+0 i+1 i+2 i+3 + * + * Then a read of the adapter memory via the PCI-E Memory Window + * will yield: + * + * x = readl(i) + * 31 0 + * [ b3 | b2 | b1 | b0 ] + * + * If this value is stored into local memory on a Little-Endian system + * it will show up correctly in local memory as: + * + * ( ..., b0, b1, b2, b3, ... ) + * + * But on a Big-Endian system, the store will show up in memory + * incorrectly swizzled as: + * + * ( ..., b3, b2, b1, b0, ... ) + * + * So we need to account for this in the reads and writes to the + * PCI-E Memory Window below by undoing the register read/write + * swizzels. */ while (len > 0) { if (dir == T4_MEMORY_READ) - *buf++ = (__force __be32) t4_read_reg(adap, - mem_base + offset); + *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, + mem_base + offset)); else t4_write_reg(adap, mem_base + offset, - (__force u32) *buf++); + (__force u32)cpu_to_le32(*buf++)); offset += sizeof(__be32); len -= sizeof(__be32); @@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, */ if (resid) { union { - __be32 word; + u32 word; char byte[4]; } last; unsigned char *bp; int i; if (dir == T4_MEMORY_READ) { - last.word = (__force __be32) t4_read_reg(adap, - mem_base + offset); + last.word = le32_to_cpu( + (__force __le32)t4_read_reg(adap, + mem_base + offset)); for (bp = (unsigned char *)buf, i = resid; i < 4; i++) bp[i] = last.byte[i]; } else { @@ -584,13 +618,741 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, for (i = resid; i < 4; i++) last.byte[i] = 0; t4_write_reg(adap, mem_base + offset, - (__force u32) last.word); + (__force u32)cpu_to_le32(last.word)); } } return 0; } +/** + * t4_get_regs_len - return the size of the chips register set + * @adapter: the adapter + * + * Returns the size of the chip's BAR0 register space. + */ +unsigned int t4_get_regs_len(struct adapter *adapter) +{ + unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + + switch (chip_version) { + case CHELSIO_T4: + return T4_REGMAP_SIZE; + + case CHELSIO_T5: + return T5_REGMAP_SIZE; + } + + dev_err(adapter->pdev_dev, + "Unsupported chip version %d\n", chip_version); + return 0; +} + +/** + * t4_get_regs - read chip registers into provided buffer + * @adap: the adapter + * @buf: register buffer + * @buf_size: size (in bytes) of register buffer + * + * If the provided register buffer isn't large enough for the chip's + * full register range, the register dump will be truncated to the + * register buffer's size. + */ +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) +{ + static const unsigned int t4_reg_ranges[] = { + 0x1008, 0x1108, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1300, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x30d8, + 0x30e0, 0x5924, + 0x5960, 0x59d4, + 0x5a00, 0x5af8, + 0x6000, 0x6098, + 0x6100, 0x6150, + 0x6200, 0x6208, + 0x6240, 0x6248, + 0x6280, 0x6338, + 0x6370, 0x638c, + 0x6400, 0x643c, + 0x6500, 0x6524, + 0x6a00, 0x6a38, + 0x6a60, 0x6a78, + 0x6b00, 0x6b84, + 0x6bf0, 0x6c84, + 0x6cf0, 0x6d84, + 0x6df0, 0x6e84, + 0x6ef0, 0x6f84, + 0x6ff0, 0x7084, + 0x70f0, 0x7184, + 0x71f0, 0x7284, + 0x72f0, 0x7384, + 0x73f0, 0x7450, + 0x7500, 0x7530, + 0x7600, 0x761c, + 0x7680, 0x76cc, + 0x7700, 0x7798, + 0x77c0, 0x77fc, + 0x7900, 0x79fc, + 0x7b00, 0x7c38, + 0x7d00, 0x7efc, + 0x8dc0, 0x8e1c, + 0x8e30, 0x8e78, + 0x8ea0, 0x8f6c, + 0x8fc0, 0x9074, + 0x90fc, 0x90fc, + 0x9400, 0x9458, + 0x9600, 0x96bc, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0x9fec, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xea7c, + 0xf000, 0x11110, + 0x11118, 0x11190, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x1924c, + 0x193f8, 0x19474, + 0x19490, 0x194f8, + 0x19800, 0x19f30, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e28c, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e68c, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea8c, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee8c, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f28c, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f68c, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa8c, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe8c, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x20000, 0x2002c, + 0x20100, 0x2013c, + 0x20190, 0x201c8, + 0x20200, 0x20318, + 0x20400, 0x20528, + 0x20540, 0x20614, + 0x21000, 0x21040, + 0x2104c, 0x21060, + 0x210c0, 0x210ec, + 0x21200, 0x21268, + 0x21270, 0x21284, + 0x212fc, 0x21388, + 0x21400, 0x21404, + 0x21500, 0x21518, + 0x2152c, 0x2153c, + 0x21550, 0x21554, + 0x21600, 0x21600, + 0x21608, 0x21628, + 0x21630, 0x2163c, + 0x21700, 0x2171c, + 0x21780, 0x2178c, + 0x21800, 0x21c38, + 0x21c80, 0x21d7c, + 0x21e00, 0x21e04, + 0x22000, 0x2202c, + 0x22100, 0x2213c, + 0x22190, 0x221c8, + 0x22200, 0x22318, + 0x22400, 0x22528, + 0x22540, 0x22614, + 0x23000, 0x23040, + 0x2304c, 0x23060, + 0x230c0, 0x230ec, + 0x23200, 0x23268, + 0x23270, 0x23284, + 0x232fc, 0x23388, + 0x23400, 0x23404, + 0x23500, 0x23518, + 0x2352c, 0x2353c, + 0x23550, 0x23554, + 0x23600, 0x23600, + 0x23608, 0x23628, + 0x23630, 0x2363c, + 0x23700, 0x2371c, + 0x23780, 0x2378c, + 0x23800, 0x23c38, + 0x23c80, 0x23d7c, + 0x23e00, 0x23e04, + 0x24000, 0x2402c, + 0x24100, 0x2413c, + 0x24190, 0x241c8, + 0x24200, 0x24318, + 0x24400, 0x24528, + 0x24540, 0x24614, + 0x25000, 0x25040, + 0x2504c, 0x25060, + 0x250c0, 0x250ec, + 0x25200, 0x25268, + 0x25270, 0x25284, + 0x252fc, 0x25388, + 0x25400, 0x25404, + 0x25500, 0x25518, + 0x2552c, 0x2553c, + 0x25550, 0x25554, + 0x25600, 0x25600, + 0x25608, 0x25628, + 0x25630, 0x2563c, + 0x25700, 0x2571c, + 0x25780, 0x2578c, + 0x25800, 0x25c38, + 0x25c80, 0x25d7c, + 0x25e00, 0x25e04, + 0x26000, 0x2602c, + 0x26100, 0x2613c, + 0x26190, 0x261c8, + 0x26200, 0x26318, + 0x26400, 0x26528, + 0x26540, 0x26614, + 0x27000, 0x27040, + 0x2704c, 0x27060, + 0x270c0, 0x270ec, + 0x27200, 0x27268, + 0x27270, 0x27284, + 0x272fc, 0x27388, + 0x27400, 0x27404, + 0x27500, 0x27518, + 0x2752c, 0x2753c, + 0x27550, 0x27554, + 0x27600, 0x27600, + 0x27608, 0x27628, + 0x27630, 0x2763c, + 0x27700, 0x2771c, + 0x27780, 0x2778c, + 0x27800, 0x27c38, + 0x27c80, 0x27d7c, + 0x27e00, 0x27e04 + }; + + static const unsigned int t5_reg_ranges[] = { + 0x1008, 0x1148, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1280, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x3028, + 0x3060, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x58bc, + 0x5940, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a9c, + 0x5b9c, 0x5bfc, + 0x6000, 0x6040, + 0x6058, 0x614c, + 0x7700, 0x7798, + 0x77c0, 0x78fc, + 0x7b00, 0x7c54, + 0x7d00, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e84, + 0x8ea0, 0x8f84, + 0x8fc0, 0x90f8, + 0x9400, 0x9470, + 0x9600, 0x96f4, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0x11088, + 0x1109c, 0x11110, + 0x11118, 0x1117c, + 0x11190, 0x11204, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x19290, + 0x193f8, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c60, + 0x19c94, 0x19e10, + 0x19e50, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fe4, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30030, + 0x30100, 0x30144, + 0x30190, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x30834, + 0x308c0, 0x30908, + 0x30910, 0x309ac, + 0x30a00, 0x30a04, + 0x30a0c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30c24, + 0x30d08, 0x30d14, + 0x30d1c, 0x30d20, + 0x30d3c, 0x30d50, + 0x31200, 0x3120c, + 0x31220, 0x31220, + 0x31240, 0x31240, + 0x31600, 0x31600, + 0x31608, 0x3160c, + 0x31a00, 0x31a1c, + 0x31e04, 0x31e20, + 0x31e38, 0x31e3c, + 0x31e80, 0x31e80, + 0x31e88, 0x31ea8, + 0x31eb0, 0x31eb4, + 0x31ec8, 0x31ed4, + 0x31fb8, 0x32004, + 0x32208, 0x3223c, + 0x32600, 0x32630, + 0x32a00, 0x32abc, + 0x32b00, 0x32b70, + 0x33000, 0x33048, + 0x33060, 0x3309c, + 0x330f0, 0x33148, + 0x33160, 0x3319c, + 0x331f0, 0x332e4, + 0x332f8, 0x333e4, + 0x333f8, 0x33448, + 0x33460, 0x3349c, + 0x334f0, 0x33548, + 0x33560, 0x3359c, + 0x335f0, 0x336e4, + 0x336f8, 0x337e4, + 0x337f8, 0x337fc, + 0x33814, 0x33814, + 0x3382c, 0x3382c, + 0x33880, 0x3388c, + 0x338e8, 0x338ec, + 0x33900, 0x33948, + 0x33960, 0x3399c, + 0x339f0, 0x33ae4, + 0x33af8, 0x33b10, + 0x33b28, 0x33b28, + 0x33b3c, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c28, 0x33c28, + 0x33c3c, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34030, + 0x34100, 0x34144, + 0x34190, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x34834, + 0x348c0, 0x34908, + 0x34910, 0x349ac, + 0x34a00, 0x34a04, + 0x34a0c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34c24, + 0x34d08, 0x34d14, + 0x34d1c, 0x34d20, + 0x34d3c, 0x34d50, + 0x35200, 0x3520c, + 0x35220, 0x35220, + 0x35240, 0x35240, + 0x35600, 0x35600, + 0x35608, 0x3560c, + 0x35a00, 0x35a1c, + 0x35e04, 0x35e20, + 0x35e38, 0x35e3c, + 0x35e80, 0x35e80, + 0x35e88, 0x35ea8, + 0x35eb0, 0x35eb4, + 0x35ec8, 0x35ed4, + 0x35fb8, 0x36004, + 0x36208, 0x3623c, + 0x36600, 0x36630, + 0x36a00, 0x36abc, + 0x36b00, 0x36b70, + 0x37000, 0x37048, + 0x37060, 0x3709c, + 0x370f0, 0x37148, + 0x37160, 0x3719c, + 0x371f0, 0x372e4, + 0x372f8, 0x373e4, + 0x373f8, 0x37448, + 0x37460, 0x3749c, + 0x374f0, 0x37548, + 0x37560, 0x3759c, + 0x375f0, 0x376e4, + 0x376f8, 0x377e4, + 0x377f8, 0x377fc, + 0x37814, 0x37814, + 0x3782c, 0x3782c, + 0x37880, 0x3788c, + 0x378e8, 0x378ec, + 0x37900, 0x37948, + 0x37960, 0x3799c, + 0x379f0, 0x37ae4, + 0x37af8, 0x37b10, + 0x37b28, 0x37b28, + 0x37b3c, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c28, 0x37c28, + 0x37c3c, 0x37c50, + 0x37cf0, 0x37cfc, + 0x38000, 0x38030, + 0x38100, 0x38144, + 0x38190, 0x381d0, + 0x38200, 0x38318, + 0x38400, 0x3852c, + 0x38540, 0x3861c, + 0x38800, 0x38834, + 0x388c0, 0x38908, + 0x38910, 0x389ac, + 0x38a00, 0x38a04, + 0x38a0c, 0x38a2c, + 0x38a44, 0x38a50, + 0x38a74, 0x38c24, + 0x38d08, 0x38d14, + 0x38d1c, 0x38d20, + 0x38d3c, 0x38d50, + 0x39200, 0x3920c, + 0x39220, 0x39220, + 0x39240, 0x39240, + 0x39600, 0x39600, + 0x39608, 0x3960c, + 0x39a00, 0x39a1c, + 0x39e04, 0x39e20, + 0x39e38, 0x39e3c, + 0x39e80, 0x39e80, + 0x39e88, 0x39ea8, + 0x39eb0, 0x39eb4, + 0x39ec8, 0x39ed4, + 0x39fb8, 0x3a004, + 0x3a208, 0x3a23c, + 0x3a600, 0x3a630, + 0x3aa00, 0x3aabc, + 0x3ab00, 0x3ab70, + 0x3b000, 0x3b048, + 0x3b060, 0x3b09c, + 0x3b0f0, 0x3b148, + 0x3b160, 0x3b19c, + 0x3b1f0, 0x3b2e4, + 0x3b2f8, 0x3b3e4, + 0x3b3f8, 0x3b448, + 0x3b460, 0x3b49c, + 0x3b4f0, 0x3b548, + 0x3b560, 0x3b59c, + 0x3b5f0, 0x3b6e4, + 0x3b6f8, 0x3b7e4, + 0x3b7f8, 0x3b7fc, + 0x3b814, 0x3b814, + 0x3b82c, 0x3b82c, + 0x3b880, 0x3b88c, + 0x3b8e8, 0x3b8ec, + 0x3b900, 0x3b948, + 0x3b960, 0x3b99c, + 0x3b9f0, 0x3bae4, + 0x3baf8, 0x3bb10, + 0x3bb28, 0x3bb28, + 0x3bb3c, 0x3bb50, + 0x3bbf0, 0x3bc10, + 0x3bc28, 0x3bc28, + 0x3bc3c, 0x3bc50, + 0x3bcf0, 0x3bcfc, + 0x3c000, 0x3c030, + 0x3c100, 0x3c144, + 0x3c190, 0x3c1d0, + 0x3c200, 0x3c318, + 0x3c400, 0x3c52c, + 0x3c540, 0x3c61c, + 0x3c800, 0x3c834, + 0x3c8c0, 0x3c908, + 0x3c910, 0x3c9ac, + 0x3ca00, 0x3ca04, + 0x3ca0c, 0x3ca2c, + 0x3ca44, 0x3ca50, + 0x3ca74, 0x3cc24, + 0x3cd08, 0x3cd14, + 0x3cd1c, 0x3cd20, + 0x3cd3c, 0x3cd50, + 0x3d200, 0x3d20c, + 0x3d220, 0x3d220, + 0x3d240, 0x3d240, + 0x3d600, 0x3d600, + 0x3d608, 0x3d60c, + 0x3da00, 0x3da1c, + 0x3de04, 0x3de20, + 0x3de38, 0x3de3c, + 0x3de80, 0x3de80, + 0x3de88, 0x3dea8, + 0x3deb0, 0x3deb4, + 0x3dec8, 0x3ded4, + 0x3dfb8, 0x3e004, + 0x3e208, 0x3e23c, + 0x3e600, 0x3e630, + 0x3ea00, 0x3eabc, + 0x3eb00, 0x3eb70, + 0x3f000, 0x3f048, + 0x3f060, 0x3f09c, + 0x3f0f0, 0x3f148, + 0x3f160, 0x3f19c, + 0x3f1f0, 0x3f2e4, + 0x3f2f8, 0x3f3e4, + 0x3f3f8, 0x3f448, + 0x3f460, 0x3f49c, + 0x3f4f0, 0x3f548, + 0x3f560, 0x3f59c, + 0x3f5f0, 0x3f6e4, + 0x3f6f8, 0x3f7e4, + 0x3f7f8, 0x3f7fc, + 0x3f814, 0x3f814, + 0x3f82c, 0x3f82c, + 0x3f880, 0x3f88c, + 0x3f8e8, 0x3f8ec, + 0x3f900, 0x3f948, + 0x3f960, 0x3f99c, + 0x3f9f0, 0x3fae4, + 0x3faf8, 0x3fb10, + 0x3fb28, 0x3fb28, + 0x3fb3c, 0x3fb50, + 0x3fbf0, 0x3fc10, + 0x3fc28, 0x3fc28, + 0x3fc3c, 0x3fc50, + 0x3fcf0, 0x3fcfc, + 0x40000, 0x4000c, + 0x40040, 0x40068, + 0x40080, 0x40144, + 0x40180, 0x4018c, + 0x40200, 0x40298, + 0x402ac, 0x4033c, + 0x403f8, 0x403fc, + 0x41304, 0x413c4, + 0x41400, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x44078, + 0x440c0, 0x44278, + 0x442c0, 0x44478, + 0x444c0, 0x44678, + 0x446c0, 0x44878, + 0x448c0, 0x449fc, + 0x45000, 0x45068, + 0x45080, 0x45084, + 0x450a0, 0x450b0, + 0x45200, 0x45268, + 0x45280, 0x45284, + 0x452a0, 0x452b0, + 0x460c0, 0x460e4, + 0x47000, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x47814, + 0x48000, 0x4800c, + 0x48040, 0x48068, + 0x48080, 0x48144, + 0x48180, 0x4818c, + 0x48200, 0x48298, + 0x482ac, 0x4833c, + 0x483f8, 0x483fc, + 0x49304, 0x493c4, + 0x49400, 0x4941c, + 0x49480, 0x494d0, + 0x4c000, 0x4c078, + 0x4c0c0, 0x4c278, + 0x4c2c0, 0x4c478, + 0x4c4c0, 0x4c678, + 0x4c6c0, 0x4c878, + 0x4c8c0, 0x4c9fc, + 0x4d000, 0x4d068, + 0x4d080, 0x4d084, + 0x4d0a0, 0x4d0b0, + 0x4d200, 0x4d268, + 0x4d280, 0x4d284, + 0x4d2a0, 0x4d2b0, + 0x4e0c0, 0x4e0e4, + 0x4f000, 0x4f08c, + 0x4f200, 0x4f250, + 0x4f400, 0x4f420, + 0x4f600, 0x4f618, + 0x4f800, 0x4f814, + 0x50000, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x5101c, + 0x51300, 0x51308, + }; + + u32 *buf_end = (u32 *)((char *)buf + buf_size); + const unsigned int *reg_ranges; + int reg_ranges_size, range; + unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); + + /* Select the right set of register ranges to dump depending on the + * adapter chip type. + */ + switch (chip_version) { + case CHELSIO_T4: + reg_ranges = t4_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); + break; + + case CHELSIO_T5: + reg_ranges = t5_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); + break; + + default: + dev_err(adap->pdev_dev, + "Unsupported chip version %d\n", chip_version); + return; + } + + /* Clear the register buffer and insert the appropriate register + * values selected by the above register ranges. + */ + memset(buf, 0, buf_size); + for (range = 0; range < reg_ranges_size; range += 2) { + unsigned int reg = reg_ranges[range]; + unsigned int last_reg = reg_ranges[range + 1]; + u32 *bufp = (u32 *)((char *)buf + reg); + + /* Iterate across the register range filling in the register + * buffer but don't write past the end of the register buffer. + */ + while (reg <= last_reg && bufp < buf_end) { + *bufp++ = t4_read_reg(adap, reg); + reg += sizeof(u32); + } + } +} + #define EEPROM_STAT_ADDR 0x7bfc #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 @@ -833,7 +1595,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's - * natural endianess. + * natural endianness. */ int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) @@ -1086,7 +1848,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, } /* Installed successfully, update the cached header too. */ - memcpy(card_fw, fs_fw, sizeof(*card_fw)); + *card_fw = *fs_fw; card_fw_usable = 1; *reset = 0; /* already reset as part of load_fw */ } @@ -3524,7 +4286,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, * For the single-MTU buffers in unpacked mode we need to include * space for the SGE Control Packet Shift, 14 byte Ethernet header, * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet - * Padding boundry. All of these are accommodated in the Factory + * Padding boundary. All of these are accommodated in the Factory * Default Firmware Configuration File but we need to adjust it for * this host's cache line size. */ @@ -4425,6 +5187,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, } /** + * t4_init_devlog_params - initialize adapter->params.devlog + * @adap: the adapter + * + * Initialize various fields of the adapter's Firmware Device Log + * Parameters structure. + */ +int t4_init_devlog_params(struct adapter *adap) +{ + struct devlog_params *dparams = &adap->params.devlog; + u32 pf_dparams; + unsigned int devlog_meminfo; + struct fw_devlog_cmd devlog_cmd; + int ret; + + /* If we're dealing with newer firmware, the Device Log Paramerters + * are stored in a designated register which allows us to access the + * Device Log even if we can't talk to the firmware. + */ + pf_dparams = + t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG)); + if (pf_dparams) { + unsigned int nentries, nentries128; + + dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams); + dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4; + + nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams); + nentries = (nentries128 + 1) * 128; + dparams->size = nentries * sizeof(struct fw_devlog_e); + + return 0; + } + + /* Otherwise, ask the firmware for it's Device Log Parameters. + */ + memset(&devlog_cmd, 0, sizeof(devlog_cmd)); + devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), + &devlog_cmd); + if (ret) + return ret; + + devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); + dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); + dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; + dparams->size = ntohl(devlog_cmd.memsize_devlog); + + return 0; +} + +/** * t4_init_sge_params - initialize adap->params.sge * @adapter: the adapter * @@ -4495,7 +5310,7 @@ int t4_init_tp_params(struct adapter *adap) PROTOCOL_F); /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID - * represents the presense of an Outer VLAN instead of a VNIC ID. + * represents the presence of an Outer VLAN instead of a VNIC ID. */ if ((adap->params.tp.ingress_config & VNIC_F) == 0) adap->params.tp.vnic_shift = -1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 0fb975e258b3..30a2f56e99c2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -794,6 +794,14 @@ struct cpl_rx_pkt { __be16 err_vec; }; +#define RXF_PSH_S 20 +#define RXF_PSH_V(x) ((x) << RXF_PSH_S) +#define RXF_PSH_F RXF_PSH_V(1U) + +#define RXF_SYN_S 21 +#define RXF_SYN_V(x) ((x) << RXF_SYN_S) +#define RXF_SYN_F RXF_SYN_V(1U) + #define RXF_UDP_S 22 #define RXF_UDP_V(x) ((x) << RXF_UDP_S) #define RXF_UDP_F RXF_UDP_V(1U) @@ -810,6 +818,18 @@ struct cpl_rx_pkt { #define RXF_IP6_V(x) ((x) << RXF_IP6_S) #define RXF_IP6_F RXF_IP6_V(1U) +#define RXF_SYN_COOKIE_S 26 +#define RXF_SYN_COOKIE_V(x) ((x) << RXF_SYN_COOKIE_S) +#define RXF_SYN_COOKIE_F RXF_SYN_COOKIE_V(1U) + +#define RXF_FCOE_S 26 +#define RXF_FCOE_V(x) ((x) << RXF_FCOE_S) +#define RXF_FCOE_F RXF_FCOE_V(1U) + +#define RXF_LRO_S 27 +#define RXF_LRO_V(x) ((x) << RXF_LRO_S) +#define RXF_LRO_F RXF_LRO_V(1U) + /* rx_pkt.l2info fields */ #define RX_ETHHDR_LEN_S 0 #define RX_ETHHDR_LEN_M 0x1F @@ -846,6 +866,11 @@ struct cpl_rx_pkt { #define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S) #define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M) +/* rx_pkt.err_vec fields */ +#define RXERR_CSUM_S 13 +#define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S) +#define RXERR_CSUM_F RXERR_CSUM_V(1U) + struct cpl_trace_pkt { u8 opcode; u8 intf; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index ddfb5b846045..1a9a6f334d2d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -60,8 +60,6 @@ * -- Used to finish the definition of the PCI ID Table. Note that we * -- will be adding a trailing semi-colon (";") here. */ -#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN - #ifndef CH_PCI_DEVICE_ID_FUNCTION #error CH_PCI_DEVICE_ID_FUNCTION not defined! #endif @@ -154,8 +152,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; -#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ - #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 231a725f6d5d..326674b19983 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -63,6 +63,8 @@ #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) + #define SGE_PF_KDOORBELL_A 0x0 #define QID_S 15 @@ -707,6 +709,7 @@ #define PFNUM_V(x) ((x) << PFNUM_S) #define PCIE_FW_A 0x30b8 +#define PCIE_FW_PF_A 0x30bc #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 9b353a88cbda..03fbfd1fb3df 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -36,7 +36,7 @@ #define _T4FW_INTERFACE_H_ enum fw_retval { - FW_SUCCESS = 0, /* completed sucessfully */ + FW_SUCCESS = 0, /* completed successfully */ FW_EPERM = 1, /* operation not permitted */ FW_ENOENT = 2, /* no such file or directory */ FW_EIO = 5, /* input/output error; hw bad */ @@ -101,7 +101,7 @@ enum fw_wr_opcodes { FW_RI_BIND_MW_WR = 0x18, FW_RI_FR_NSMR_WR = 0x19, FW_RI_INV_LSTAG_WR = 0x1a, - FW_LASTC2E_WR = 0x40 + FW_LASTC2E_WR = 0x70 }; struct fw_wr_hdr { @@ -993,6 +993,7 @@ enum fw_memtype_cf { FW_MEMTYPE_CF_EXTMEM = 0x2, FW_MEMTYPE_CF_FLASH = 0x4, FW_MEMTYPE_CF_INTERNAL = 0x5, + FW_MEMTYPE_CF_EXTMEM1 = 0x6, }; struct fw_caps_config_cmd { @@ -1035,6 +1036,7 @@ enum fw_params_mnem { FW_PARAMS_MNEM_PFVF = 2, /* function params */ FW_PARAMS_MNEM_REG = 3, /* limited register access */ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ + FW_PARAMS_MNEM_CHNET = 5, /* chnet params */ FW_PARAMS_MNEM_LAST }; @@ -3102,7 +3104,8 @@ enum fw_devlog_facility { FW_DEVLOG_FACILITY_FCOE = 0x2E, FW_DEVLOG_FACILITY_FOISCSI = 0x30, FW_DEVLOG_FACILITY_FOFCOE = 0x32, - FW_DEVLOG_FACILITY_MAX = 0x32, + FW_DEVLOG_FACILITY_CHNET = 0x34, + FW_DEVLOG_FACILITY_MAX = 0x34, }; /* log message format */ @@ -3139,4 +3142,36 @@ struct fw_devlog_cmd { (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) +/* P C I E F W P F 7 R E G I S T E R */ + +/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to + * access the "devlog" which needing to contact firmware. The encoding is + * mostly the same as that returned by the DEVLOG command except for the size + * which is encoded as the number of entries in multiples-1 of 128 here rather + * than the memory size as is done in the DEVLOG command. Thus, 0 means 128 + * and 15 means 2048. This of course in turn constrains the allowed values + * for the devlog size ... + */ +#define PCIE_FW_PF_DEVLOG 7 + +#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28 +#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf +#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \ + ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S) +#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \ + (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \ + PCIE_FW_PF_DEVLOG_NENTRIES128_M) + +#define PCIE_FW_PF_DEVLOG_ADDR16_S 4 +#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff +#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S) +#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \ + (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M) + +#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0 +#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf +#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S) +#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ + (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) + #endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index e2bd3f747858..b9d1cbac0eee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,13 +36,13 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0C -#define T4FW_VERSION_MICRO 0x19 +#define T4FW_VERSION_MINOR 0x0D +#define T4FW_VERSION_MICRO 0x20 #define T4FW_VERSION_BUILD 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0C -#define T5FW_VERSION_MICRO 0x19 +#define T5FW_VERSION_MINOR 0x0D +#define T5FW_VERSION_MICRO 0x20 #define T5FW_VERSION_BUILD 0x00 #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 122e2964e63b..1d893b0b7ddf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -3034,7 +3034,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) /* Macros needed to support the PCI Device ID Table ... */ #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ - static struct pci_device_id cxgb4vf_pci_tbl[] = { + static const struct pci_device_id cxgb4vf_pci_tbl[] = { #define CH_PCI_DEVICE_ID_FUNCTION 0x8 #define CH_PCI_ID_TABLE_ENTRY(devid) \ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 0545f0de1c52..482f6de6817d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -875,7 +875,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) * Write Header (incorporated as part of the cpl_tx_pkt_lso and * cpl_tx_pkt structures), followed by either a TX Packet Write CPL * message or, if we're doing a Large Send Offload, an LSO CPL message - * with an embeded TX Packet Write CPL message. + * with an embedded TX Packet Write CPL message. */ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); if (skb_shinfo(skb)->gso_size) @@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, ? (tq->pidx - 1) : (tq->size - 1)); __be64 *src = (__be64 *)&tq->desc[index]; - __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + + __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr + SGE_UDB_WCDOORBELL); unsigned int count = EQ_UNIT / sizeof(__be64); @@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, * DMA. */ while (count) { - writeq(*src, dst); + /* the (__force u64) is because the compiler + * doesn't understand the endian swizzling + * going on + */ + writeq((__force u64)*src, dst); src++; dst++; count--; @@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); wr = (void *)&txq->q.desc[txq->q.pidx]; wr->equiq_to_len16 = cpu_to_be32(wr_mid); - wr->r3[0] = cpu_to_be64(0); - wr->r3[1] = cpu_to_be64(0); + wr->r3[0] = cpu_to_be32(0); + wr->r3[1] = cpu_to_be32(0); skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); end = (u64 *)wr + flits; @@ -1747,7 +1751,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) * Figure out what kind of response we've received from the * SGE. */ - rmb(); + dma_rmb(); rsp_type = RSPD_TYPE(rc->type_gen); if (likely(rsp_type == RSP_TYPE_FLBUF)) { struct page_frag *fp; @@ -1931,7 +1935,7 @@ static unsigned int process_intrq(struct adapter *adapter) * error and go on to the next response message. This should * never happen ... */ - rmb(); + dma_rmb(); if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { dev_err(adapter->pdev_dev, "Unexpected INTRQ response type %d\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 1b5506df35b1..966ee900ed00 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, if (rpl) { /* request bit in high-order BE word */ - WARN_ON((be32_to_cpu(*(const u32 *)cmd) + WARN_ON((be32_to_cpu(*(const __be32 *)cmd) & FW_CMD_REQUEST_F) == 0); get_mbox_rpl(adapter, rpl, size, mbox_data); - WARN_ON((be32_to_cpu(*(u32 *)rpl) + WARN_ON((be32_to_cpu(*(__be32 *)rpl) & FW_CMD_REQUEST_F) != 0); } t4_write_reg(adapter, mbox_ctl, @@ -339,7 +339,7 @@ int t4vf_port_init(struct adapter *adapter, int pidx) * @adapter: the adapter * * Issues a reset command to FW. For a Physical Function this would - * result in the Firmware reseting all of its state. For a Virtual + * result in the Firmware resetting all of its state. For a Virtual * Function this just resets the state associated with the VF. */ int t4vf_fw_reset(struct adapter *adapter) @@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter, * o The BAR2 Queue ID. * o The BAR2 Queue ID Offset into the BAR2 page. */ - bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); bar2_qid = qid & qpp_mask; bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index d1c025fd9726..60383040d6c6 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1578,7 +1578,7 @@ out1: #ifndef CONFIG_CS89x0_PLATFORM /* - * This function converts the I/O port addres used by the cs89x0_probe() and + * This function converts the I/O port address used by the cs89x0_probe() and * init_module() functions to the I/O memory address used by the * cs89x0_probe1() function. */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9cbe038a388e..204bd182473b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) } if (ENIC_TEST_INTR(pba, notify_intr)) { - vnic_intr_return_all_credits(&enic->intr[notify_intr]); enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[notify_intr]); } if (ENIC_TEST_INTR(pba, err_intr)) { @@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) struct enic *enic = data; unsigned int intr = enic_msix_notify_intr(enic); - vnic_intr_return_all_credits(&enic->intr[intr]); enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[intr]); return IRQ_HANDLED; } @@ -893,7 +893,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, } else { memset(pp, 0, sizeof(*pp)); if (vf == PORT_SELF_VF) - memset(netdev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(netdev->dev_addr); } } else { /* Set flag to indicate that the port assoc/disassoc @@ -903,14 +903,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ if (pp->request == PORT_REQUEST_DISASSOCIATE) { - memset(pp->mac_addr, 0, ETH_ALEN); + eth_zero_addr(pp->mac_addr); if (vf == PORT_SELF_VF) - memset(netdev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(netdev->dev_addr); } } if (vf == PORT_SELF_VF) - memset(pp->vf_mac, 0, ETH_ALEN); + eth_zero_addr(pp->vf_mac); return err; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 50a00777228e..afd8e78e024e 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -653,7 +653,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev) if ( !(db->media_mode & DMFE_AUTO) ) db->op_mode = db->media_mode; /* Force Mode */ - /* Initialize Transmit/Receive decriptor and CR3/4 */ + /* Initialize Transmit/Receive descriptor and CR3/4 */ dmfe_descriptor_init(dev); /* Init CR6 to program DM910x operation */ diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 3b42556f7f8d..ed41559bae77 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev) (unsigned int)tp->rx_ring[i].buffer1, (unsigned int)tp->rx_ring[i].buffer2, buf[0], buf[1], buf[2]); - for (j = 0; buf[j] != 0xee && j < 1600; j++) + for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) if (j < 100) pr_cont(" %02x", buf[j]); pr_cont(" j=%d\n", j); diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 1c5916b13778..2c30c0c83f98 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -564,7 +564,7 @@ static void uli526x_init(struct net_device *dev) if ( !(db->media_mode & ULI526X_AUTO) ) db->op_mode = db->media_mode; /* Force Mode */ - /* Initialize Transmit/Receive decriptor and CR3/4 */ + /* Initialize Transmit/Receive descriptor and CR3/4 */ uli526x_descriptor_init(dev, ioaddr); /* Init CR6 to program M526X operation */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..1bf1cdce74ac 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -30,11 +30,12 @@ #include <linux/firmware.h> #include <linux/slab.h> #include <linux/u64_stats_sync.h> +#include <linux/cpumask.h> #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "10.4u" +#define DRV_VER "10.6.0.1" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -87,6 +88,7 @@ #define BE3_MAX_EVT_QS 16 #define BE3_SRIOV_MAX_EVT_QS 8 +#define MAX_RSS_IFACES 15 #define MAX_RX_QS 32 #define MAX_EVT_QS 32 #define MAX_TX_QS 32 @@ -97,6 +99,7 @@ #define BE_NAPI_WEIGHT 64 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) +#define MAX_NUM_POST_ERX_DB 255u #define MAX_VFS 30 /* Max VFs supported by BE3 FW */ #define FW_VER_LEN 32 @@ -182,6 +185,7 @@ struct be_eq_obj { u16 spurious_intr; struct napi_struct napi; struct be_adapter *adapter; + cpumask_var_t affinity_mask; #ifdef CONFIG_NET_RX_BUSY_POLL #define BE_EQ_IDLE 0 @@ -238,10 +242,17 @@ struct be_tx_stats { struct u64_stats_sync sync_compl; }; +/* Structure to hold some data of interest obtained from a TX CQE */ +struct be_tx_compl_info { + u8 status; /* Completion status */ + u16 end_index; /* Completed TXQ Index */ +}; + struct be_tx_obj { u32 db_offset; struct be_queue_info q; struct be_queue_info cq; + struct be_tx_compl_info txcp; /* Remember the skbs that were transmitted */ struct sk_buff *sent_skb_list[TX_Q_LEN]; struct be_tx_stats stats; @@ -354,6 +365,7 @@ struct be_vf_cfg { u16 vlan_tag; u32 tx_rate; u32 plink_tracking; + u32 privileges; }; enum vf_state { @@ -369,6 +381,7 @@ enum vf_state { #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) #define BE_FLAGS_SETUP_DONE BIT(9) #define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) +#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 @@ -403,8 +416,11 @@ struct be_resources { u16 max_tx_qs; u16 max_rss_qs; u16 max_rx_qs; + u16 max_cq_count; u16 max_uc_mac; /* Max UC MACs programmable */ u16 max_vlans; /* Number of vlans supported */ + u16 max_iface_count; + u16 max_mcc_count; u16 max_evt_qs; u32 if_cap_flags; u32 vf_if_cap_flags; /* VF if capability flags */ @@ -417,12 +433,46 @@ struct rss_info { u8 rss_hkey[RSS_HASH_KEY_LEN]; }; +/* Macros to read/write the 'features' word of be_wrb_params structure. + */ +#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT +#define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT) + +#define BE_WRB_F_GET(word, name) \ + (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name)) + +#define BE_WRB_F_SET(word, name, val) \ + ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name))) + +/* Feature/offload bits */ +enum { + BE_WRB_F_CRC_BIT, /* Ethernet CRC */ + BE_WRB_F_IPCS_BIT, /* IP csum */ + BE_WRB_F_TCPCS_BIT, /* TCP csum */ + BE_WRB_F_UDPCS_BIT, /* UDP csum */ + BE_WRB_F_LSO_BIT, /* LSO */ + BE_WRB_F_LSO6_BIT, /* LSO6 */ + BE_WRB_F_VLAN_BIT, /* VLAN */ + BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */ +}; + +/* The structure below provides a HW-agnostic abstraction of WRB params + * retrieved from a TX skb. This is in turn passed to chip specific routines + * during transmit, to set the corresponding params in the WRB. + */ +struct be_wrb_params { + u32 features; /* Feature bits */ + u16 vlan_tag; /* VLAN tag */ + u16 lso_mss; /* MSS for LSO */ +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ struct be_dma_mem mbox_mem; @@ -447,6 +497,8 @@ struct be_adapter { /* Rx rings */ u16 num_rx_qs; + u16 num_rss_qs; + u16 need_def_rxq; struct be_rx_obj rx_obj[MAX_RX_QS]; u32 big_page_size; /* Compounded page size shared by rx wrbs */ @@ -461,7 +513,7 @@ struct be_adapter { struct delayed_work work; u16 work_counter; - struct delayed_work func_recovery_work; + struct delayed_work be_err_detection_work; u32 flags; u32 cmd_privileges; /* Ethtool knobs and info */ @@ -594,9 +646,8 @@ extern const struct ethtool_ops be_ethtool_ops; for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \ i++, rxo++) -/* Skip the default non-rss queue (last one)*/ #define for_all_rss_queues(adapter, rxo, i) \ - for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\ + for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \ i++, rxo++) #define for_all_tx_queues(adapter, txo, i) \ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..fb140faeafb1 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -635,73 +635,16 @@ static int lancer_wait_ready(struct be_adapter *adapter) for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_RDY_MASK) - break; - - msleep(1000); - } - - if (i == SLIPORT_READY_TIMEOUT) - return sliport_status ? : -1; - - return 0; -} - -static bool lancer_provisioning_error(struct be_adapter *adapter) -{ - u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; - - sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); - if (sliport_status & SLIPORT_STATUS_ERR_MASK) { - sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); - sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); - - if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && - sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) - return true; - } - return false; -} - -int lancer_test_and_set_rdy_state(struct be_adapter *adapter) -{ - int status; - u32 sliport_status, err, reset_needed; - bool resource_error; + return 0; - resource_error = lancer_provisioning_error(adapter); - if (resource_error) - return -EAGAIN; + if (sliport_status & SLIPORT_STATUS_ERR_MASK && + !(sliport_status & SLIPORT_STATUS_RN_MASK)) + return -EIO; - status = lancer_wait_ready(adapter); - if (!status) { - sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); - err = sliport_status & SLIPORT_STATUS_ERR_MASK; - reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; - if (err && reset_needed) { - iowrite32(SLI_PORT_CONTROL_IP_MASK, - adapter->db + SLIPORT_CONTROL_OFFSET); - - /* check if adapter has corrected the error */ - status = lancer_wait_ready(adapter); - sliport_status = ioread32(adapter->db + - SLIPORT_STATUS_OFFSET); - sliport_status &= (SLIPORT_STATUS_ERR_MASK | - SLIPORT_STATUS_RN_MASK); - if (status || sliport_status) - status = -1; - } else if (err || reset_needed) { - status = -1; - } + msleep(1000); } - /* Stop error recovery if error is not recoverable. - * No resource error is temporary errors and will go away - * when PF provisions resources. - */ - resource_error = lancer_provisioning_error(adapter); - if (resource_error) - status = -EAGAIN; - return status; + return sliport_status ? : -1; } int be_fw_wait_ready(struct be_adapter *adapter) @@ -720,6 +663,10 @@ int be_fw_wait_ready(struct be_adapter *adapter) } do { + /* There's no means to poll POST state on BE2/3 VFs */ + if (BEx_chip(adapter) && be_virtfn(adapter)) + return 0; + stage = be_POST_stage_get(adapter); if (stage == POST_STAGE_ARMFW_RDY) return 0; @@ -734,7 +681,7 @@ int be_fw_wait_ready(struct be_adapter *adapter) err: dev_err(dev, "POST timeout; stage=%#x\n", stage); - return -1; + return -ETIMEDOUT; } static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) @@ -1902,15 +1849,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, { int num_eqs, i = 0; - if (lancer_chip(adapter) && num > 8) { - while (num) { - num_eqs = min(num, 8); - __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); - i += num_eqs; - num -= num_eqs; - } - } else { - __be_cmd_modify_eqd(adapter, set_eqd, num); + while (num) { + num_eqs = min(num, 8); + __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); + i += num_eqs; + num -= num_eqs; } return 0; @@ -1918,7 +1861,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num) + u32 num, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1936,6 +1879,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); + req->hdr.domain = domain; req->interface_id = if_id; req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; @@ -2126,16 +2070,12 @@ int be_cmd_reset_function(struct be_adapter *adapter) int status; if (lancer_chip(adapter)) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); status = lancer_wait_ready(adapter); - if (!status) { - iowrite32(SLI_PORT_CONTROL_IP_MASK, - adapter->db + SLIPORT_CONTROL_OFFSET); - status = lancer_test_and_set_rdy_state(adapter); - } - if (status) { + if (status) dev_err(&adapter->pdev->dev, "Adapter in non recoverable error\n"); - } return status; } @@ -3078,7 +3018,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, mac_count = resp->true_mac_count + resp->pseudo_mac_count; /* Mac list returned could contain one or more active mac_ids - * or one or more true or pseudo permanant mac addresses. + * or one or more true or pseudo permanent mac addresses. * If an active mac_id is present, return first active mac_id * found. */ @@ -3133,7 +3073,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) int status; bool pmac_valid = false; - memset(mac, 0, ETH_ALEN); + eth_zero_addr(mac); if (BEx_chip(adapter)) { if (be_physfn(adapter)) @@ -3634,12 +3574,12 @@ static void be_copy_nic_desc(struct be_resources *res, res->max_rss_qs = le16_to_cpu(desc->rssq_count); res->max_rx_qs = le16_to_cpu(desc->rq_count); res->max_evt_qs = le16_to_cpu(desc->eq_count); + res->max_cq_count = le16_to_cpu(desc->cq_count); + res->max_iface_count = le16_to_cpu(desc->iface_count); + res->max_mcc_count = le16_to_cpu(desc->mcc_count); /* Clear flags that driver is not interested in */ res->if_cap_flags = le32_to_cpu(desc->cap_flags) & BE_IF_CAP_FLAGS_WANT; - /* Need 1 RXQ as the default RXQ */ - if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs) - res->max_rss_qs -= 1; } /* Uses Mbox */ @@ -3701,7 +3641,7 @@ err: /* Will use MBOX only if MCCQ has not been created */ int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 domain) + struct be_resources *res, u8 query, u8 domain) { struct be_cmd_resp_get_profile_config *resp; struct be_cmd_req_get_profile_config *req; @@ -3711,7 +3651,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_nic_res_desc *nic; struct be_mcc_wrb wrb = {0}; struct be_dma_mem cmd; - u32 desc_count; + u16 desc_count; int status; memset(&cmd, 0, sizeof(struct be_dma_mem)); @@ -3730,12 +3670,19 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; + /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the + * descriptors with all bits set to "1" for the fields which can be + * modified using SET_PROFILE_CONFIG cmd. + */ + if (query == RESOURCE_MODIFIABLE) + req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; + status = be_cmd_notify_wait(adapter, &wrb); if (status) goto err; resp = cmd.va; - desc_count = le32_to_cpu(resp->desc_count); + desc_count = le16_to_cpu(resp->desc_count); pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, desc_count); @@ -3860,23 +3807,80 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 1, version, domain); } +static void be_fill_vf_res_template(struct be_adapter *adapter, + struct be_resources pool_res, + u16 num_vfs, u16 num_vf_qs, + struct be_nic_res_desc *nic_vft) +{ + u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; + struct be_resources res_mod = {0}; + + /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, + * which are modifiable using SET_PROFILE_CONFIG cmd. + */ + be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); + + /* If RSS IFACE capability flags are modifiable for a VF, set the + * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if + * more than 1 RSSQ is available for a VF. + * Otherwise, provision only 1 queue pair for VF. + */ + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { + nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + if (num_vf_qs > 1) { + vf_if_cap_flags |= BE_IF_FLAGS_RSS; + if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) + vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; + } else { + vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | + BE_IF_FLAGS_DEFQ_RSS); + } + + nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); + } else { + num_vf_qs = 1; + } + + nic_vft->rq_count = cpu_to_le16(num_vf_qs); + nic_vft->txq_count = cpu_to_le16(num_vf_qs); + nic_vft->rssq_count = cpu_to_le16(num_vf_qs); + nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / + (num_vfs + 1)); + + /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally + * among the PF and it's VFs, if the fields are changeable + */ + if (res_mod.max_uc_mac == FIELD_MODIFIABLE) + nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / + (num_vfs + 1)); + + if (res_mod.max_vlans == FIELD_MODIFIABLE) + nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / + (num_vfs + 1)); + + if (res_mod.max_iface_count == FIELD_MODIFIABLE) + nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / + (num_vfs + 1)); + + if (res_mod.max_mcc_count == FIELD_MODIFIABLE) + nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / + (num_vfs + 1)); +} + int be_cmd_set_sriov_config(struct be_adapter *adapter, - struct be_resources res, u16 num_vfs) + struct be_resources pool_res, u16 num_vfs, + u16 num_vf_qs) { struct { struct be_pcie_res_desc pcie; struct be_nic_res_desc nic_vft; } __packed desc; - u16 vf_q_count; - - if (BEx_chip(adapter) || lancer_chip(adapter)) - return 0; /* PF PCIE descriptor */ be_reset_pcie_desc(&desc.pcie); desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; - desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); + desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); desc.pcie.pf_num = adapter->pdev->devfn; desc.pcie.sriov_state = num_vfs ? 1 : 0; desc.pcie.num_vfs = cpu_to_le16(num_vfs); @@ -3885,32 +3889,12 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter, be_reset_nic_desc(&desc.nic_vft); desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; - desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) | - (1 << NOSV_SHIFT); + desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); desc.nic_vft.pf_num = adapter->pdev->devfn; desc.nic_vft.vf_num = 0; - if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) { - /* If number of VFs requested is 8 less than max supported, - * assign 8 queue pairs to the PF and divide the remaining - * resources evenly among the VFs - */ - if (num_vfs < (be_max_vfs(adapter) - 8)) - vf_q_count = (res.max_rss_qs - 8) / num_vfs; - else - vf_q_count = res.max_rss_qs / num_vfs; - - desc.nic_vft.rq_count = cpu_to_le16(vf_q_count); - desc.nic_vft.txq_count = cpu_to_le16(vf_q_count); - desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1); - desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count); - } else { - desc.nic_vft.txq_count = cpu_to_le16(1); - desc.nic_vft.rq_count = cpu_to_le16(1); - desc.nic_vft.rssq_count = cpu_to_le16(0); - /* One CQ for each TX, RX and MCCQ */ - desc.nic_vft.cq_count = cpu_to_le16(3); - } + be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, + &desc.nic_vft); return be_cmd_set_profile_config(adapter, &desc, 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db761e8e42a3..1ec22300e254 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -588,14 +588,15 @@ enum be_if_flags { BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800, - BE_IF_FLAGS_MULTICAST = 0x1000 + BE_IF_FLAGS_MULTICAST = 0x1000, + BE_IF_FLAGS_DEFQ_RSS = 0x1000000 }; #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ - BE_IF_FLAGS_UNTAGGED) + BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_DEFQ_RSS) #define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \ BE_IF_FLAGS_VLAN_PROMISCUOUS |\ @@ -2021,6 +2022,7 @@ struct be_cmd_req_set_ext_fat_caps { #define PORT_RESOURCE_DESC_TYPE_V1 0x55 #define MAX_RESOURCE_DESC 264 +#define IF_CAPS_FLAGS_VALID_SHIFT 0 /* IF caps valid */ #define VFT_SHIFT 3 /* VF template */ #define IMM_SHIFT 6 /* Immediate */ #define NOSV_SHIFT 7 /* No save */ @@ -2131,20 +2133,28 @@ struct be_cmd_resp_get_func_config { u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; }; -#define ACTIVE_PROFILE_TYPE 0x2 +enum { + RESOURCE_LIMITS, + RESOURCE_MODIFIABLE +}; + struct be_cmd_req_get_profile_config { struct be_cmd_req_hdr hdr; u8 rsvd; +#define ACTIVE_PROFILE_TYPE 0x2 +#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3) u8 type; u16 rsvd1; }; struct be_cmd_resp_get_profile_config { struct be_cmd_resp_hdr hdr; - u32 desc_count; + __le16 desc_count; + u16 rsvd; u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; }; +#define FIELD_MODIFIABLE 0xFFFF struct be_cmd_req_set_profile_config { struct be_cmd_req_hdr hdr; u32 rsvd; @@ -2256,7 +2266,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, int be_cmd_get_fw_ver(struct be_adapter *adapter); int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num); + u32 num, u32 domain); int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); @@ -2344,7 +2354,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter); int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res); int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 domain); + struct be_resources *res, u8 query, u8 domain); int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); @@ -2355,4 +2365,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter, int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port); int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); int be_cmd_set_sriov_config(struct be_adapter *adapter, - struct be_resources res, u16 num_vfs); + struct be_resources res, u16 num_vfs, + u16 num_vf_qs); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 4d2de4700769..b765c24625bf 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1097,7 +1097,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter, return status; if (be_multi_rxq(adapter)) { - for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { + for (j = 0; j < 128; j += adapter->num_rss_qs) { for_all_rss_queues(adapter, rxo, i) { if ((j + i) >= 128) break; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..fb0bc3c3620e 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -30,6 +30,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("GPL"); +/* num_vfs module param is obsolete. + * Use sysfs method to enable/disable VFs. + */ static unsigned int num_vfs; module_param(num_vfs, uint, S_IRUGO); MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); @@ -727,48 +730,86 @@ static u16 skb_ip_proto(struct sk_buff *skb) ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; } -static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, - struct sk_buff *skb, u32 wrb_cnt, u32 len, - bool skip_hw_vlan) +static inline bool be_is_txq_full(struct be_tx_obj *txo) { - u16 vlan_tag, proto; + return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len; +} - memset(hdr, 0, sizeof(*hdr)); +static inline bool be_can_txq_wake(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) < txo->q.len / 2; +} - SET_TX_WRB_HDR_BITS(crc, hdr, 1); +static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) > txo->pend_wrb_cnt; +} + +static void be_get_wrb_params_from_skb(struct be_adapter *adapter, + struct sk_buff *skb, + struct be_wrb_params *wrb_params) +{ + u16 proto; if (skb_is_gso(skb)) { - SET_TX_WRB_HDR_BITS(lso, hdr, 1); - SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); + BE_WRB_F_SET(wrb_params->features, LSO, 1); + wrb_params->lso_mss = skb_shinfo(skb)->gso_size; if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) - SET_TX_WRB_HDR_BITS(lso6, hdr, 1); + BE_WRB_F_SET(wrb_params->features, LSO6, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation) { - SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, IPCS, 1); proto = skb_inner_ip_proto(skb); } else { proto = skb_ip_proto(skb); } if (proto == IPPROTO_TCP) - SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, TCPCS, 1); else if (proto == IPPROTO_UDP) - SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); + BE_WRB_F_SET(wrb_params->features, UDPCS, 1); } if (skb_vlan_tag_present(skb)) { - SET_TX_WRB_HDR_BITS(vlan, hdr, 1); - vlan_tag = be_get_tx_vlan_tag(adapter, skb); - SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); + BE_WRB_F_SET(wrb_params->features, VLAN, 1); + wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb); } - SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); - SET_TX_WRB_HDR_BITS(len, hdr, len); + BE_WRB_F_SET(wrb_params->features, CRC, 1); +} + +static void wrb_fill_hdr(struct be_adapter *adapter, + struct be_eth_hdr_wrb *hdr, + struct be_wrb_params *wrb_params, + struct sk_buff *skb) +{ + memset(hdr, 0, sizeof(*hdr)); - /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0 - * When this hack is not needed, the evt bit is set while ringing DB + SET_TX_WRB_HDR_BITS(crc, hdr, + BE_WRB_F_GET(wrb_params->features, CRC)); + SET_TX_WRB_HDR_BITS(ipcs, hdr, + BE_WRB_F_GET(wrb_params->features, IPCS)); + SET_TX_WRB_HDR_BITS(tcpcs, hdr, + BE_WRB_F_GET(wrb_params->features, TCPCS)); + SET_TX_WRB_HDR_BITS(udpcs, hdr, + BE_WRB_F_GET(wrb_params->features, UDPCS)); + + SET_TX_WRB_HDR_BITS(lso, hdr, + BE_WRB_F_GET(wrb_params->features, LSO)); + SET_TX_WRB_HDR_BITS(lso6, hdr, + BE_WRB_F_GET(wrb_params->features, LSO6)); + SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss); + + /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this + * hack is not needed, the evt bit is set while ringing DB. */ - if (skip_hw_vlan) - SET_TX_WRB_HDR_BITS(event, hdr, 1); + SET_TX_WRB_HDR_BITS(event, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW)); + SET_TX_WRB_HDR_BITS(vlan, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN)); + SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag); + + SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb)); + SET_TX_WRB_HDR_BITS(len, hdr, skb->len); } static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, @@ -788,77 +829,124 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, } } -/* Returns the number of WRBs used up by the skb */ +/* Grab a WRB header for xmit */ +static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) +{ + u16 head = txo->q.head; + + queue_head_inc(&txo->q); + return head; +} + +/* Set up the WRB header for xmit */ +static void be_tx_setup_wrb_hdr(struct be_adapter *adapter, + struct be_tx_obj *txo, + struct be_wrb_params *wrb_params, + struct sk_buff *skb, u16 head) +{ + u32 num_frags = skb_wrb_cnt(skb); + struct be_queue_info *txq = &txo->q; + struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); + + wrb_fill_hdr(adapter, hdr, wrb_params, skb); + be_dws_cpu_to_le(hdr, sizeof(*hdr)); + + BUG_ON(txo->sent_skb_list[head]); + txo->sent_skb_list[head] = skb; + txo->last_req_hdr = head; + atomic_add(num_frags, &txq->used); + txo->last_req_wrb_cnt = num_frags; + txo->pend_wrb_cnt += num_frags; +} + +/* Setup a WRB fragment (buffer descriptor) for xmit */ +static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, + int len) +{ + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + wrb = queue_head_node(txq); + wrb_fill(wrb, busaddr, len); + queue_head_inc(txq); +} + +/* Bring the queue back to the state it was in before be_xmit_enqueue() routine + * was invoked. The producer index is restored to the previous packet and the + * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. + */ +static void be_xmit_restore(struct be_adapter *adapter, + struct be_tx_obj *txo, u16 head, bool map_single, + u32 copied) +{ + struct device *dev; + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + dev = &adapter->pdev->dev; + txq->head = head; + + /* skip the first wrb (hdr); it's not mapped */ + queue_head_inc(txq); + while (copied) { + wrb = queue_head_node(txq); + unmap_tx_frag(dev, wrb, map_single); + map_single = false; + copied -= le32_to_cpu(wrb->frag_len); + queue_head_inc(txq); + } + + txq->head = head; +} + +/* Enqueue the given packet for transmit. This routine allocates WRBs for the + * packet, dma maps the packet buffers and sets up the WRBs. Returns the number + * of WRBs used up by the packet. + */ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, - struct sk_buff *skb, bool skip_hw_vlan) + struct sk_buff *skb, + struct be_wrb_params *wrb_params) { u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); struct device *dev = &adapter->pdev->dev; struct be_queue_info *txq = &txo->q; - struct be_eth_hdr_wrb *hdr; bool map_single = false; - struct be_eth_wrb *wrb; - dma_addr_t busaddr; u16 head = txq->head; + dma_addr_t busaddr; + int len; - hdr = queue_head_node(txq); - wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan); - be_dws_cpu_to_le(hdr, sizeof(*hdr)); - - queue_head_inc(txq); + head = be_tx_get_wrb_hdr(txo); if (skb->len > skb->data_len) { - int len = skb_headlen(skb); + len = skb_headlen(skb); busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; map_single = true; - wrb = queue_head_node(txq); - wrb_fill(wrb, busaddr, len); - queue_head_inc(txq); + be_tx_setup_wrb_frag(txo, busaddr, len); copied += len; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + len = skb_frag_size(frag); - busaddr = skb_frag_dma_map(dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); + busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; - wrb = queue_head_node(txq); - wrb_fill(wrb, busaddr, skb_frag_size(frag)); - queue_head_inc(txq); - copied += skb_frag_size(frag); + be_tx_setup_wrb_frag(txo, busaddr, len); + copied += len; } - BUG_ON(txo->sent_skb_list[head]); - txo->sent_skb_list[head] = skb; - txo->last_req_hdr = head; - atomic_add(wrb_cnt, &txq->used); - txo->last_req_wrb_cnt = wrb_cnt; - txo->pend_wrb_cnt += wrb_cnt; + be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head); be_tx_stats_update(txo, skb); return wrb_cnt; dma_err: - /* Bring the queue back to the state it was in before this - * routine was invoked. - */ - txq->head = head; - /* skip the first wrb (hdr); it's not mapped */ - queue_head_inc(txq); - while (copied) { - wrb = queue_head_node(txq); - unmap_tx_frag(dev, wrb, map_single); - map_single = false; - copied -= le32_to_cpu(wrb->frag_len); - adapter->drv_stats.dma_map_errors++; - queue_head_inc(txq); - } - txq->head = head; + adapter->drv_stats.dma_map_errors++; + be_xmit_restore(adapter, txo, head, map_single, copied); return 0; } @@ -869,7 +957,8 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter) static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params + *wrb_params) { u16 vlan_tag = 0; @@ -886,8 +975,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to * skip VLAN insertion */ - if (skip_hw_vlan) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } if (vlan_tag) { @@ -905,8 +993,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, vlan_tag); if (unlikely(!skb)) return skb; - if (skip_hw_vlan) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } return skb; @@ -946,7 +1033,8 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params + *wrb_params) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; unsigned int eth_hdr_len; @@ -970,7 +1058,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (be_pvid_tagging_enabled(adapter) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - *skip_hw_vlan = true; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); /* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. @@ -978,7 +1066,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_vlan_tag_present(skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err; } @@ -1000,7 +1088,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err; } @@ -1014,7 +1102,7 @@ err: static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, - bool *skip_hw_vlan) + struct be_wrb_params *wrb_params) { /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or * less may cause a transmit stall on that port. So the work-around is @@ -1026,7 +1114,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, } if (BEx_chip(adapter) || lancer_chip(adapter)) { - skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); + skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params); if (!skb) return NULL; } @@ -1060,24 +1148,26 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo) static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) { - bool skip_hw_vlan = false, flush = !skb->xmit_more; struct be_adapter *adapter = netdev_priv(netdev); u16 q_idx = skb_get_queue_mapping(skb); struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; - struct be_queue_info *txq = &txo->q; + struct be_wrb_params wrb_params = { 0 }; + bool flush = !skb->xmit_more; u16 wrb_cnt; - skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); + skb = be_xmit_workarounds(adapter, skb, &wrb_params); if (unlikely(!skb)) goto drop; - wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan); + be_get_wrb_params_from_skb(adapter, skb, &wrb_params); + + wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); if (unlikely(!wrb_cnt)) { dev_kfree_skb_any(skb); goto drop; } - if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) { + if (be_is_txq_full(txo)) { netif_stop_subqueue(netdev, q_idx); tx_stats(txo)->tx_stops++; } @@ -1171,7 +1261,7 @@ static int be_vid_config(struct be_adapter *adapter) for_each_set_bit(i, adapter->vids, VLAN_N_VID) vids[num++] = cpu_to_le16(i); - status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); + status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); if (status) { dev_err(dev, "Setting HW VLAN filtering failed\n"); /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -1380,11 +1470,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf, return 0; } +static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + u16 vids[BE_NUM_VLANS_SUPPORTED]; + int vf_if_id = vf_cfg->if_handle; + int status; + + /* Enable Transparent VLAN Tagging */ + status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); + if (status) + return status; + + /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ + vids[0] = 0; + status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); + if (!status) + dev_info(&adapter->pdev->dev, + "Cleared guest VLANs on VF%d", vf); + + /* After TVT is enabled, disallow VFs to program VLAN filters */ + if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & + ~BE_PRIV_FILTMGMT, vf + 1); + if (!status) + vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; + } + return 0; +} + +static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + struct device *dev = &adapter->pdev->dev; + int status; + + /* Reset Transparent VLAN Tagging. */ + status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, + vf_cfg->if_handle, 0); + if (status) + return status; + + /* Allow VFs to program VLAN filtering */ + if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | + BE_PRIV_FILTMGMT, vf + 1); + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; + dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); + } + } + + dev_info(dev, + "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); + return 0; +} + static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; - int status = 0; + int status; if (!sriov_enabled(adapter)) return -EPERM; @@ -1394,24 +1540,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if (vlan || qos) { vlan |= qos << VLAN_PRIO_SHIFT; - if (vf_cfg->vlan_tag != vlan) - status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, - vf_cfg->if_handle, 0); + status = be_set_vf_tvt(adapter, vf, vlan); } else { - /* Reset Transparent Vlan Tagging. */ - status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, - vf + 1, vf_cfg->if_handle, 0); + status = be_clear_vf_tvt(adapter, vf); } if (status) { dev_err(&adapter->pdev->dev, - "VLAN %d config on VF %d failed : %#x\n", vlan, - vf, status); + "VLAN %d config on VF %d failed : %#x\n", vlan, vf, + status); return be_cmd_status(status); } vf_cfg->vlan_tag = vlan; - return 0; } @@ -1981,7 +2122,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) if (rxo->rx_post_starved) rxo->rx_post_starved = false; do { - notify = min(256u, posted); + notify = min(MAX_NUM_POST_ERX_DB, posted); be_rxq_notify(adapter, rxq->id, notify); posted -= notify; } while (posted); @@ -1991,18 +2132,23 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) } } -static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) +static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo) { - struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); + struct be_queue_info *tx_cq = &txo->cq; + struct be_tx_compl_info *txcp = &txo->txcp; + struct be_eth_tx_compl *compl = queue_tail_node(tx_cq); - if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) + if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) return NULL; + /* Ensure load ordering of valid bit dword and other dwords below */ rmb(); - be_dws_le_to_cpu(txcp, sizeof(*txcp)); + be_dws_le_to_cpu(compl, sizeof(*compl)); - txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; + txcp->status = GET_TX_COMPL_BITS(status, compl); + txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl); + compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; queue_tail_inc(tx_cq); return txcp; } @@ -2123,9 +2269,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter) { u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; struct device *dev = &adapter->pdev->dev; - struct be_tx_obj *txo; + struct be_tx_compl_info *txcp; struct be_queue_info *txq; - struct be_eth_tx_compl *txcp; + struct be_tx_obj *txo; int i, pending_txqs; /* Stop polling for compls when HW has been silent for 10ms */ @@ -2136,10 +2282,10 @@ static void be_tx_compl_clean(struct be_adapter *adapter) cmpl = 0; num_wrbs = 0; txq = &txo->q; - while ((txcp = be_tx_compl_get(&txo->cq))) { - end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); - num_wrbs += be_tx_compl_process(adapter, txo, - end_idx); + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += + be_tx_compl_process(adapter, txo, + txcp->end_index); cmpl++; } if (cmpl) { @@ -2147,7 +2293,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter) atomic_sub(num_wrbs, &txq->used); timeo = 0; } - if (atomic_read(&txq->used) == txo->pend_wrb_cnt) + if (!be_is_tx_compl_pending(txo)) pending_txqs--; } @@ -2196,6 +2342,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); } + free_cpumask_var(eqo->affinity_mask); be_queue_free(adapter, &eqo->q); } } @@ -2211,6 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter) adapter->cfg_num_qs); for_all_evt_queues(adapter, eqo, i) { + if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), + eqo->affinity_mask); + netif_napi_add(adapter->netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); napi_hash_add(&eqo->napi); @@ -2302,8 +2454,9 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) static int be_tx_qs_create(struct be_adapter *adapter) { - struct be_queue_info *cq, *eq; + struct be_queue_info *cq; struct be_tx_obj *txo; + struct be_eq_obj *eqo; int status, i; adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); @@ -2321,8 +2474,8 @@ static int be_tx_qs_create(struct be_adapter *adapter) /* If num_evt_qs is less than num_tx_qs, then more than * one txq share an eq */ - eq = &adapter->eq_obj[i % adapter->num_evt_qs].q; - status = be_cmd_cq_create(adapter, cq, eq, false, 3); + eqo = &adapter->eq_obj[i % adapter->num_evt_qs]; + status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3); if (status) return status; @@ -2334,6 +2487,9 @@ static int be_tx_qs_create(struct be_adapter *adapter) status = be_cmd_txq_create(adapter, txo); if (status) return status; + + netif_set_xps_queue(adapter->netdev, eqo->affinity_mask, + eqo->idx); } dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n", @@ -2362,13 +2518,19 @@ static int be_rx_cqs_create(struct be_adapter *adapter) int rc, i; /* We can create as many RSS rings as there are EQs. */ - adapter->num_rx_qs = adapter->num_evt_qs; + adapter->num_rss_qs = adapter->num_evt_qs; + + /* We'll use RSS only if atleast 2 RSS rings are supported. */ + if (adapter->num_rss_qs <= 1) + adapter->num_rss_qs = 0; - /* We'll use RSS only if atleast 2 RSS rings are supported. - * When RSS is used, we'll need a default RXQ for non-IP traffic. + adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq; + + /* When the interface is not capable of RSS rings (and there is no + * need to create a default RXQ) we'll still need one RXQ */ - if (adapter->num_rx_qs > 1) - adapter->num_rx_qs++; + if (adapter->num_rx_qs == 0) + adapter->num_rx_qs = 1; adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { @@ -2387,8 +2549,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) } dev_info(&adapter->pdev->dev, - "created %d RSS queue(s) and 1 default RX queue\n", - adapter->num_rx_qs - 1); + "created %d RX queue(s)\n", adapter->num_rx_qs); return 0; } @@ -2498,7 +2659,7 @@ loop_continue: return work_done; } -static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) +static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status) { switch (status) { case BE_TX_COMP_HDR_PARSE_ERR: @@ -2513,7 +2674,7 @@ static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) } } -static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) +static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status) { switch (status) { case LANCER_TX_COMP_LSO_ERR: @@ -2538,22 +2699,18 @@ static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, int idx) { - struct be_eth_tx_compl *txcp; int num_wrbs = 0, work_done = 0; - u32 compl_status; - u16 last_idx; + struct be_tx_compl_info *txcp; - while ((txcp = be_tx_compl_get(&txo->cq))) { - last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); - num_wrbs += be_tx_compl_process(adapter, txo, last_idx); + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index); work_done++; - compl_status = GET_TX_COMPL_BITS(status, txcp); - if (compl_status) { + if (txcp->status) { if (lancer_chip(adapter)) - lancer_update_tx_err(txo, compl_status); + lancer_update_tx_err(txo, txcp->status); else - be_update_tx_err(txo, compl_status); + be_update_tx_err(txo, txcp->status); } } @@ -2564,7 +2721,7 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, /* As Tx wrbs have been freed up, wake up netdev queue * if it was stopped due to lack of tx wrbs. */ if (__netif_subqueue_stopped(adapter->netdev, idx) && - atomic_read(&txo->q.used) < txo->q.len / 2) { + be_can_txq_wake(txo)) { netif_wake_subqueue(adapter->netdev, idx); } @@ -2756,12 +2913,12 @@ void be_detect_error(struct be_adapter *adapter) sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); adapter->hw_error = true; + error_detected = true; /* Do not log error messages if its a FW reset */ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && sliport_err2 == SLIPORT_ERROR_FW_RESET2) { dev_info(dev, "Firmware update in progress\n"); } else { - error_detected = true; dev_err(dev, "Error detected in the card\n"); dev_err(dev, "ERR: sliport status 0x%x\n", sliport_status); @@ -2772,14 +2929,12 @@ void be_detect_error(struct be_adapter *adapter) } } } else { - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW, &ue_lo); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HIGH, &ue_hi); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); + ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); + ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); + ue_lo_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_LOW_MASK); + ue_hi_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_HI_MASK); ue_lo = (ue_lo & ~ue_lo_mask); ue_hi = (ue_hi & ~ue_hi_mask); @@ -2883,6 +3038,8 @@ static int be_msix_register(struct be_adapter *adapter) status = request_irq(vec, be_msix, 0, eqo->desc, eqo); if (status) goto err_msix; + + irq_set_affinity_hint(vec, eqo->affinity_mask); } return 0; @@ -2927,7 +3084,7 @@ static void be_irq_unregister(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct be_eq_obj *eqo; - int i; + int i, vec; if (!adapter->isr_registered) return; @@ -2939,8 +3096,11 @@ static void be_irq_unregister(struct be_adapter *adapter) } /* MSIx */ - for_all_evt_queues(adapter, eqo, i) - free_irq(be_msix_vec_get(adapter, eqo), eqo); + for_all_evt_queues(adapter, eqo, i) { + vec = be_msix_vec_get(adapter, eqo); + irq_set_affinity_hint(vec, NULL); + free_irq(vec, eqo); + } done: adapter->isr_registered = false; @@ -3022,12 +3182,14 @@ static int be_rx_qs_create(struct be_adapter *adapter) return rc; } - /* The FW would like the default RXQ to be created first */ - rxo = default_rxo(adapter); - rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size, - adapter->if_handle, false, &rxo->rss_id); - if (rc) - return rc; + if (adapter->need_def_rxq || !adapter->num_rss_qs) { + rxo = default_rxo(adapter); + rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, + rx_frag_size, adapter->if_handle, + false, &rxo->rss_id); + if (rc) + return rc; + } for_all_rss_queues(adapter, rxo, i) { rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, @@ -3038,8 +3200,7 @@ static int be_rx_qs_create(struct be_adapter *adapter) } if (be_multi_rxq(adapter)) { - for (j = 0; j < RSS_INDIR_TABLE_LEN; - j += adapter->num_rx_qs - 1) { + for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) { for_all_rss_queues(adapter, rxo, i) { if ((j + i) >= RSS_INDIR_TABLE_LEN) break; @@ -3130,7 +3291,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) int status = 0; u8 mac[ETH_ALEN]; - memset(mac, 0, ETH_ALEN); + eth_zero_addr(mac); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, @@ -3275,6 +3436,14 @@ static void be_cancel_worker(struct be_adapter *adapter) } } +static void be_cancel_err_detection(struct be_adapter *adapter) +{ + if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) { + cancel_delayed_work_sync(&adapter->be_err_detection_work); + adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED; + } +} + static void be_mac_clear(struct be_adapter *adapter) { if (adapter->pmac_id) { @@ -3306,8 +3475,39 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) } #endif +static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) +{ + struct be_resources res = adapter->pool_res; + u16 num_vf_qs = 1; + + /* Distribute the queue resources equally among the PF and it's VFs + * Do not distribute queue resources in multi-channel configuration. + */ + if (num_vfs && !be_is_mc(adapter)) { + /* If number of VFs requested is 8 less than max supported, + * assign 8 queue pairs to the PF and divide the remaining + * resources evenly among the VFs + */ + if (num_vfs < (be_max_vfs(adapter) - 8)) + num_vf_qs = (res.max_rss_qs - 8) / num_vfs; + else + num_vf_qs = res.max_rss_qs / num_vfs; + + /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable + * interfaces per port. Provide RSS on VFs, only if number + * of VFs requested is less than MAX_RSS_IFACES limit. + */ + if (num_vfs >= MAX_RSS_IFACES) + num_vf_qs = 1; + } + return num_vf_qs; +} + static int be_clear(struct be_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; + u16 num_vf_qs; + be_cancel_worker(adapter); if (sriov_enabled(adapter)) @@ -3316,9 +3516,14 @@ static int be_clear(struct be_adapter *adapter) /* Re-configure FW to distribute resources evenly across max-supported * number of VFs, only when VFs are not already enabled. */ - if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev)) + if (skyhawk_chip(adapter) && be_physfn(adapter) && + !pci_vfs_assigned(pdev)) { + num_vf_qs = be_calculate_vf_qs(adapter, + pci_sriov_get_totalvfs(pdev)); be_cmd_set_sriov_config(adapter, adapter->pool_res, - pci_sriov_get_totalvfs(adapter->pdev)); + pci_sriov_get_totalvfs(pdev), + num_vf_qs); + } #ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); @@ -3339,18 +3544,14 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, u32 cap_flags, u32 vf) { u32 en_flags; - int status; en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | - BE_IF_FLAGS_RSS; + BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; en_flags &= cap_flags; - status = be_cmd_if_create(adapter, cap_flags, en_flags, - if_handle, vf); - - return status; + return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); } static int be_vfs_if_create(struct be_adapter *adapter) @@ -3367,9 +3568,15 @@ static int be_vfs_if_create(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { status = be_cmd_get_profile_config(adapter, &res, + RESOURCE_LIMITS, vf + 1); - if (!status) + if (!status) { cap_flags = res.if_cap_flags; + /* Prevent VFs from enabling VLAN promiscuous + * mode + */ + cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } } status = be_if_create(adapter, &vf_cfg->if_handle, @@ -3403,7 +3610,6 @@ static int be_vf_setup(struct be_adapter *adapter) struct device *dev = &adapter->pdev->dev; struct be_vf_cfg *vf_cfg; int status, old_vfs, vf; - u32 privileges; old_vfs = pci_num_vf(adapter->pdev); @@ -3433,15 +3639,18 @@ static int be_vf_setup(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { /* Allow VFs to programs MAC/VLAN filters */ - status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); - if (!status && !(privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, + vf + 1); + if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { status = be_cmd_set_fn_privileges(adapter, - privileges | + vf_cfg->privileges | BE_PRIV_FILTMGMT, vf + 1); - if (!status) + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; dev_info(dev, "VF%d has FILTMGMT privilege\n", vf); + } } /* Allow full available bandwidth */ @@ -3533,7 +3742,8 @@ static void BEx_get_resources(struct be_adapter *adapter, /* On a SuperNIC profile, the driver needs to use the * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits */ - be_cmd_get_profile_config(adapter, &super_nic_res, 0); + be_cmd_get_profile_config(adapter, &super_nic_res, + RESOURCE_LIMITS, 0); /* Some old versions of BE3 FW don't report max_tx_qs value */ res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; } else { @@ -3553,6 +3763,7 @@ static void BEx_get_resources(struct be_adapter *adapter, res->max_evt_qs = 1; res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; + res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS; if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) res->if_cap_flags &= ~BE_IF_FLAGS_RSS; } @@ -3572,13 +3783,12 @@ static void be_setup_init(struct be_adapter *adapter) static int be_get_sriov_config(struct be_adapter *adapter) { - struct device *dev = &adapter->pdev->dev; struct be_resources res = {0}; int max_vfs, old_vfs; - /* Some old versions of BE3 FW don't report max_vfs value */ - be_cmd_get_profile_config(adapter, &res, 0); + be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0); + /* Some old versions of BE3 FW don't report max_vfs value */ if (BE3_chip(adapter) && !res.max_vfs) { max_vfs = pci_sriov_get_totalvfs(adapter->pdev); res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; @@ -3586,35 +3796,49 @@ static int be_get_sriov_config(struct be_adapter *adapter) adapter->pool_res = res; - if (!be_max_vfs(adapter)) { - if (num_vfs) - dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n"); - adapter->num_vfs = 0; - return 0; - } - - pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter)); - - /* validate num_vfs module param */ + /* If during previous unload of the driver, the VFs were not disabled, + * then we cannot rely on the PF POOL limits for the TotalVFs value. + * Instead use the TotalVFs value stored in the pci-dev struct. + */ old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { - dev_info(dev, "%d VFs are already enabled\n", old_vfs); - if (old_vfs != num_vfs) - dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); + dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n", + old_vfs); + + adapter->pool_res.max_vfs = + pci_sriov_get_totalvfs(adapter->pdev); adapter->num_vfs = old_vfs; - } else { - if (num_vfs > be_max_vfs(adapter)) { - dev_info(dev, "Resources unavailable to init %d VFs\n", - num_vfs); - dev_info(dev, "Limiting to %d VFs\n", - be_max_vfs(adapter)); - } - adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter)); } return 0; } +static void be_alloc_sriov_res(struct be_adapter *adapter) +{ + int old_vfs = pci_num_vf(adapter->pdev); + u16 num_vf_qs; + int status; + + be_get_sriov_config(adapter); + + if (!old_vfs) + pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter)); + + /* When the HW is in SRIOV capable configuration, the PF-pool + * resources are given to PF during driver load, if there are no + * old VFs. This facility is not available in BE3 FW. + * Also, this is done by FW in Lancer chip. + */ + if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { + num_vf_qs = be_calculate_vf_qs(adapter, 0); + status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0, + num_vf_qs); + if (status) + dev_err(&adapter->pdev->dev, + "Failed to optimize SRIOV resources\n"); + } +} + static int be_get_resources(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; @@ -3635,12 +3859,23 @@ static int be_get_resources(struct be_adapter *adapter) if (status) return status; + /* If a deafault RXQ must be created, we'll use up one RSSQ*/ + if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs && + !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)) + res.max_rss_qs -= 1; + /* If RoCE may be enabled stash away half the EQs for RoCE */ if (be_roce_supported(adapter)) res.max_evt_qs /= 2; adapter->res = res; } + /* If FW supports RSS default queue, then skip creating non-RSS + * queue for non-IP traffic. + */ + adapter->need_def_rxq = (be_if_cap_flags(adapter) & + BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1; + dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", be_max_txqs(adapter), be_max_rxqs(adapter), be_max_rss(adapter), be_max_eqs(adapter), @@ -3649,47 +3884,33 @@ static int be_get_resources(struct be_adapter *adapter) be_max_uc(adapter), be_max_mc(adapter), be_max_vlans(adapter)); + /* Sanitize cfg_num_qs based on HW and platform limits */ + adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(), + be_max_qs(adapter)); return 0; } -static void be_sriov_config(struct be_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - int status; - - status = be_get_sriov_config(adapter); - if (status) { - dev_err(dev, "Failed to query SR-IOV configuration\n"); - dev_err(dev, "SR-IOV cannot be enabled\n"); - return; - } - - /* When the HW is in SRIOV capable configuration, the PF-pool - * resources are equally distributed across the max-number of - * VFs. The user may request only a subset of the max-vfs to be - * enabled. Based on num_vfs, redistribute the resources across - * num_vfs so that each VF will have access to more number of - * resources. This facility is not available in BE3 FW. - * Also, this is done by FW in Lancer chip. - */ - if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) { - status = be_cmd_set_sriov_config(adapter, - adapter->pool_res, - adapter->num_vfs); - if (status) - dev_err(dev, "Failed to optimize SR-IOV resources\n"); - } -} - static int be_get_config(struct be_adapter *adapter) { + int status, level; u16 profile_id; - int status; + + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; status = be_cmd_query_fw_cfg(adapter); if (status) return status; + if (BEx_chip(adapter)) { + level = be_cmd_get_fw_log_level(adapter); + adapter->msg_enable = + level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + } + + be_cmd_get_acpi_wol_cap(adapter); + be_cmd_query_port_name(adapter); if (be_physfn(adapter)) { @@ -3699,9 +3920,6 @@ static int be_get_config(struct be_adapter *adapter) "Using profile 0x%x\n", profile_id); } - if (!BE2_chip(adapter) && be_physfn(adapter)) - be_sriov_config(adapter); - status = be_get_resources(adapter); if (status) return status; @@ -3711,9 +3929,6 @@ static int be_get_config(struct be_adapter *adapter) if (!adapter->pmac_id) return -ENOMEM; - /* Sanitize cfg_num_qs based on HW and platform limits */ - adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter)); - return 0; } @@ -3747,6 +3962,13 @@ static void be_schedule_worker(struct be_adapter *adapter) adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } +static void be_schedule_err_detection(struct be_adapter *adapter) +{ + schedule_delayed_work(&adapter->be_err_detection_work, + msecs_to_jiffies(1000)); + adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; +} + static int be_setup_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3829,16 +4051,61 @@ static inline int fw_major_num(const char *fw_ver) return fw_major; } +/* If any VFs are already enabled don't FLR the PF */ +static bool be_reset_required(struct be_adapter *adapter) +{ + return pci_num_vf(adapter->pdev) ? false : true; +} + +/* Wait for the FW to be ready and perform the required initialization */ +static int be_func_init(struct be_adapter *adapter) +{ + int status; + + status = be_fw_wait_ready(adapter); + if (status) + return status; + + if (be_reset_required(adapter)) { + status = be_cmd_reset_function(adapter); + if (status) + return status; + + /* Wait for interrupts to quiesce after an FLR */ + msleep(100); + + /* We can clear all errors when function reset succeeds */ + be_clear_all_error(adapter); + } + + /* Tell FW we're ready to fire cmds */ + status = be_cmd_fw_init(adapter); + if (status) + return status; + + /* Allow interrupts for other ULPs running on NIC function */ + be_intr_set(adapter, true); + + return 0; +} + static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; int status; + status = be_func_init(adapter); + if (status) + return status; + be_setup_init(adapter); if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); + if (!BE2_chip(adapter) && be_physfn(adapter)) + be_alloc_sriov_res(adapter); + status = be_get_config(adapter); if (status) goto err; @@ -3879,8 +4146,6 @@ static int be_setup(struct be_adapter *adapter) be_set_rx_mode(adapter->netdev); - be_cmd_get_acpi_wol_cap(adapter); - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) @@ -4790,6 +5055,142 @@ static void be_netdev_init(struct net_device *netdev) netdev->ethtool_ops = &be_ethtool_ops; } +static void be_cleanup(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) + be_close(netdev); + rtnl_unlock(); + + be_clear(adapter); +} + +static int be_resume(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + status = be_setup(adapter); + if (status) + return status; + + if (netif_running(netdev)) { + status = be_open(netdev); + if (status) + return status; + } + + netif_device_attach(netdev); + + return 0; +} + +static int be_err_recover(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + status = be_resume(adapter); + if (status) + goto err; + + dev_info(dev, "Adapter recovery successful\n"); + return 0; +err: + if (be_physfn(adapter)) + dev_err(dev, "Adapter recovery failed\n"); + else + dev_err(dev, "Re-trying adapter recovery\n"); + + return status; +} + +static void be_err_detection_task(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, + be_err_detection_work.work); + int status = 0; + + be_detect_error(adapter); + + if (adapter->hw_error) { + be_cleanup(adapter); + + /* As of now error recovery support is in Lancer only */ + if (lancer_chip(adapter)) + status = be_err_recover(adapter); + } + + /* Always attempt recovery on VFs */ + if (!status || be_virtfn(adapter)) + be_schedule_err_detection(adapter); +} + +static void be_log_sfp_info(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_query_sfp_info(adapter); + if (!status) { + dev_err(&adapter->pdev->dev, + "Unqualified SFP+ detected on %c from %s part no: %s", + adapter->port_name, adapter->phy.vendor_name, + adapter->phy.vendor_pn); + } + adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; +} + +static void be_worker(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); + struct be_rx_obj *rxo; + int i; + + /* when interrupts are not yet enabled, just reap any pending + * mcc completions + */ + if (!netif_running(adapter->netdev)) { + local_bh_disable(); + be_process_mcc(adapter); + local_bh_enable(); + goto reschedule; + } + + if (!adapter->stats_cmd_sent) { + if (lancer_chip(adapter)) + lancer_cmd_get_pport_stats(adapter, + &adapter->stats_cmd); + else + be_cmd_get_stats(adapter, &adapter->stats_cmd); + } + + if (be_physfn(adapter) && + MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) + be_cmd_get_die_temperature(adapter); + + for_all_rx_queues(adapter, rxo, i) { + /* Replenish RX-queues starved due to memory + * allocation failures. + */ + if (rxo->rx_post_starved) + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); + } + + be_eqd_update(adapter); + + if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) + be_log_sfp_info(adapter); + +reschedule: + adapter->work_counter++; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); +} + static void be_unmap_pci_bars(struct be_adapter *adapter) { if (adapter->csr) @@ -4820,131 +5221,134 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) static int be_map_pci_bars(struct be_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; u8 __iomem *addr; + u32 sli_intf; + + pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); + adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> + SLI_INTF_FAMILY_SHIFT; + adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; if (BEx_chip(adapter) && be_physfn(adapter)) { - adapter->csr = pci_iomap(adapter->pdev, 2, 0); + adapter->csr = pci_iomap(pdev, 2, 0); if (!adapter->csr) return -ENOMEM; } - addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); + addr = pci_iomap(pdev, db_bar(adapter), 0); if (!addr) goto pci_map_err; adapter->db = addr; + if (skyhawk_chip(adapter) || BEx_chip(adapter)) { + if (be_physfn(adapter)) { + /* PCICFG is the 2nd BAR in BE2 */ + addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); + if (!addr) + goto pci_map_err; + adapter->pcicfg = addr; + } else { + adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + } + } + be_roce_map_pci_bars(adapter); return 0; pci_map_err: - dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); + dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); be_unmap_pci_bars(adapter); return -ENOMEM; } -static void be_ctrl_cleanup(struct be_adapter *adapter) +static void be_drv_cleanup(struct be_adapter *adapter) { struct be_dma_mem *mem = &adapter->mbox_mem_alloced; - - be_unmap_pci_bars(adapter); + struct device *dev = &adapter->pdev->dev; if (mem->va) - dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, - mem->dma); + dma_free_coherent(dev, mem->size, mem->va, mem->dma); mem = &adapter->rx_filter; if (mem->va) - dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, - mem->dma); + dma_free_coherent(dev, mem->size, mem->va, mem->dma); + + mem = &adapter->stats_cmd; + if (mem->va) + dma_free_coherent(dev, mem->size, mem->va, mem->dma); } -static int be_ctrl_init(struct be_adapter *adapter) +/* Allocate and initialize various fields in be_adapter struct */ +static int be_drv_init(struct be_adapter *adapter) { struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; struct be_dma_mem *rx_filter = &adapter->rx_filter; - u32 sli_intf; - int status; - - pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); - adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> - SLI_INTF_FAMILY_SHIFT; - adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; - - status = be_map_pci_bars(adapter); - if (status) - goto done; + struct be_dma_mem *stats_cmd = &adapter->stats_cmd; + struct device *dev = &adapter->pdev->dev; + int status = 0; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, - mbox_mem_alloc->size, + mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); - if (!mbox_mem_alloc->va) { - status = -ENOMEM; - goto unmap_pci_bars; - } + if (!mbox_mem_alloc->va) + return -ENOMEM; + mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); - rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev, - rx_filter->size, &rx_filter->dma, - GFP_KERNEL); + rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, + &rx_filter->dma, GFP_KERNEL); if (!rx_filter->va) { status = -ENOMEM; goto free_mbox; } + if (lancer_chip(adapter)) + stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats); + else if (BE2_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0); + else if (BE3_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); + else + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); + stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, + &stats_cmd->dma, GFP_KERNEL); + if (!stats_cmd->va) { + status = -ENOMEM; + goto free_rx_filter; + } + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); - init_completion(&adapter->et_cmd_compl); - pci_save_state(adapter->pdev); - return 0; -free_mbox: - dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, - mbox_mem_alloc->va, mbox_mem_alloc->dma); - -unmap_pci_bars: - be_unmap_pci_bars(adapter); - -done: - return status; -} - -static void be_stats_cleanup(struct be_adapter *adapter) -{ - struct be_dma_mem *cmd = &adapter->stats_cmd; + pci_save_state(adapter->pdev); - if (cmd->va) - dma_free_coherent(&adapter->pdev->dev, cmd->size, - cmd->va, cmd->dma); -} + INIT_DELAYED_WORK(&adapter->work, be_worker); + INIT_DELAYED_WORK(&adapter->be_err_detection_work, + be_err_detection_task); -static int be_stats_init(struct be_adapter *adapter) -{ - struct be_dma_mem *cmd = &adapter->stats_cmd; + adapter->rx_fc = true; + adapter->tx_fc = true; - if (lancer_chip(adapter)) - cmd->size = sizeof(struct lancer_cmd_req_pport_stats); - else if (BE2_chip(adapter)) - cmd->size = sizeof(struct be_cmd_req_get_stats_v0); - else if (BE3_chip(adapter)) - cmd->size = sizeof(struct be_cmd_req_get_stats_v1); - else - /* ALL non-BE ASICs */ - cmd->size = sizeof(struct be_cmd_req_get_stats_v2); + /* Must be a power of 2 or else MODULO will BUG_ON */ + adapter->be_get_temp_freq = 64; - cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, - GFP_KERNEL); - if (!cmd->va) - return -ENOMEM; return 0; + +free_rx_filter: + dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma); +free_mbox: + dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va, + mbox_mem_alloc->dma); + return status; } static void be_remove(struct pci_dev *pdev) @@ -4957,7 +5361,7 @@ static void be_remove(struct pci_dev *pdev) be_roce_dev_remove(adapter); be_intr_set(adapter, false); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); unregister_netdev(adapter->netdev); @@ -4966,9 +5370,8 @@ static void be_remove(struct pci_dev *pdev) /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); - be_stats_cleanup(adapter); - - be_ctrl_cleanup(adapter); + be_unmap_pci_bars(adapter); + be_drv_cleanup(adapter); pci_disable_pcie_error_reporting(pdev); @@ -4978,156 +5381,6 @@ static void be_remove(struct pci_dev *pdev) free_netdev(adapter->netdev); } -static int be_get_initial_config(struct be_adapter *adapter) -{ - int status, level; - - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; - - /* Must be a power of 2 or else MODULO will BUG_ON */ - adapter->be_get_temp_freq = 64; - - if (BEx_chip(adapter)) { - level = be_cmd_get_fw_log_level(adapter); - adapter->msg_enable = - level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; - } - - adapter->cfg_num_qs = netif_get_num_default_rss_queues(); - return 0; -} - -static int lancer_recover_func(struct be_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - int status; - - status = lancer_test_and_set_rdy_state(adapter); - if (status) - goto err; - - if (netif_running(adapter->netdev)) - be_close(adapter->netdev); - - be_clear(adapter); - - be_clear_all_error(adapter); - - status = be_setup(adapter); - if (status) - goto err; - - if (netif_running(adapter->netdev)) { - status = be_open(adapter->netdev); - if (status) - goto err; - } - - dev_err(dev, "Adapter recovery successful\n"); - return 0; -err: - if (status == -EAGAIN) - dev_err(dev, "Waiting for resource provisioning\n"); - else - dev_err(dev, "Adapter recovery failed\n"); - - return status; -} - -static void be_func_recovery_task(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, func_recovery_work.work); - int status = 0; - - be_detect_error(adapter); - - if (adapter->hw_error && lancer_chip(adapter)) { - rtnl_lock(); - netif_device_detach(adapter->netdev); - rtnl_unlock(); - - status = lancer_recover_func(adapter); - if (!status) - netif_device_attach(adapter->netdev); - } - - /* In Lancer, for all errors other than provisioning error (-EAGAIN), - * no need to attempt further recovery. - */ - if (!status || status == -EAGAIN) - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); -} - -static void be_log_sfp_info(struct be_adapter *adapter) -{ - int status; - - status = be_cmd_query_sfp_info(adapter); - if (!status) { - dev_err(&adapter->pdev->dev, - "Unqualified SFP+ detected on %c from %s part no: %s", - adapter->port_name, adapter->phy.vendor_name, - adapter->phy.vendor_pn); - } - adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; -} - -static void be_worker(struct work_struct *work) -{ - struct be_adapter *adapter = - container_of(work, struct be_adapter, work.work); - struct be_rx_obj *rxo; - int i; - - /* when interrupts are not yet enabled, just reap any pending - * mcc completions */ - if (!netif_running(adapter->netdev)) { - local_bh_disable(); - be_process_mcc(adapter); - local_bh_enable(); - goto reschedule; - } - - if (!adapter->stats_cmd_sent) { - if (lancer_chip(adapter)) - lancer_cmd_get_pport_stats(adapter, - &adapter->stats_cmd); - else - be_cmd_get_stats(adapter, &adapter->stats_cmd); - } - - if (be_physfn(adapter) && - MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) - be_cmd_get_die_temperature(adapter); - - for_all_rx_queues(adapter, rxo, i) { - /* Replenish RX-queues starved due to memory - * allocation failures. - */ - if (rxo->rx_post_starved) - be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); - } - - be_eqd_update(adapter); - - if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) - be_log_sfp_info(adapter); - -reschedule: - adapter->work_counter++; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); -} - -/* If any VFs are already enabled don't FLR the PF */ -static bool be_reset_required(struct be_adapter *adapter) -{ - return pci_num_vf(adapter->pdev) ? false : true; -} - static char *mc_name(struct be_adapter *adapter) { char *str = ""; /* default */ @@ -5226,50 +5479,17 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) if (!status) dev_info(&pdev->dev, "PCIe error reporting enabled\n"); - status = be_ctrl_init(adapter); + status = be_map_pci_bars(adapter); if (status) goto free_netdev; - /* sync up with fw's ready state */ - if (be_physfn(adapter)) { - status = be_fw_wait_ready(adapter); - if (status) - goto ctrl_clean; - } - - if (be_reset_required(adapter)) { - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; - - /* Wait for interrupts to quiesce after an FLR */ - msleep(100); - } - - /* Allow interrupts for other ULPs running on NIC function */ - be_intr_set(adapter, true); - - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); + status = be_drv_init(adapter); if (status) - goto ctrl_clean; - - status = be_stats_init(adapter); - if (status) - goto ctrl_clean; - - status = be_get_initial_config(adapter); - if (status) - goto stats_clean; - - INIT_DELAYED_WORK(&adapter->work, be_worker); - INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); - adapter->rx_fc = true; - adapter->tx_fc = true; + goto unmap_bars; status = be_setup(adapter); if (status) - goto stats_clean; + goto drv_cleanup; be_netdev_init(netdev); status = register_netdev(netdev); @@ -5278,8 +5498,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) be_roce_dev_add(adapter); - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); + be_schedule_err_detection(adapter); dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), func_name(adapter), mc_name(adapter), adapter->port_name); @@ -5288,10 +5507,10 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) unsetup: be_clear(adapter); -stats_clean: - be_stats_cleanup(adapter); -ctrl_clean: - be_ctrl_cleanup(adapter); +drv_cleanup: + be_drv_cleanup(adapter); +unmap_bars: + be_unmap_pci_bars(adapter); free_netdev: free_netdev(netdev); rel_reg: @@ -5306,21 +5525,14 @@ do_none: static int be_suspend(struct pci_dev *pdev, pm_message_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; if (adapter->wol_en) be_setup_wol(adapter, true); be_intr_set(adapter, false); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); - netif_device_detach(netdev); - if (netif_running(netdev)) { - rtnl_lock(); - be_close(netdev); - rtnl_unlock(); - } - be_clear(adapter); + be_cleanup(adapter); pci_save_state(pdev); pci_disable_device(pdev); @@ -5328,13 +5540,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int be_resume(struct pci_dev *pdev) +static int be_pci_resume(struct pci_dev *pdev) { - int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); + int status = 0; status = pci_enable_device(pdev); if (status) @@ -5343,30 +5552,11 @@ static int be_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - status = be_fw_wait_ready(adapter); + status = be_resume(adapter); if (status) return status; - status = be_cmd_reset_function(adapter); - if (status) - return status; - - be_intr_set(adapter, true); - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); - if (status) - return status; - - be_setup(adapter); - if (netif_running(netdev)) { - rtnl_lock(); - be_open(netdev); - rtnl_unlock(); - } - - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); - netif_device_attach(netdev); + be_schedule_err_detection(adapter); if (adapter->wol_en) be_setup_wol(adapter, false); @@ -5386,7 +5576,7 @@ static void be_shutdown(struct pci_dev *pdev) be_roce_dev_shutdown(adapter); cancel_delayed_work_sync(&adapter->work); - cancel_delayed_work_sync(&adapter->func_recovery_work); + be_cancel_err_detection(adapter); netif_device_detach(adapter->netdev); @@ -5399,22 +5589,15 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; dev_err(&adapter->pdev->dev, "EEH error detected\n"); if (!adapter->eeh_error) { adapter->eeh_error = true; - cancel_delayed_work_sync(&adapter->func_recovery_work); - - rtnl_lock(); - netif_device_detach(netdev); - if (netif_running(netdev)) - be_close(netdev); - rtnl_unlock(); + be_cancel_err_detection(adapter); - be_clear(adapter); + be_cleanup(adapter); } if (state == pci_channel_io_perm_failure) @@ -5465,43 +5648,73 @@ static void be_eeh_resume(struct pci_dev *pdev) { int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; dev_info(&adapter->pdev->dev, "EEH resume\n"); pci_save_state(pdev); - status = be_cmd_reset_function(adapter); + status = be_resume(adapter); if (status) goto err; - /* On some BE3 FW versions, after a HW reset, - * interrupts will remain disabled for each function. - * So, explicitly enable interrupts + be_schedule_err_detection(adapter); + return; +err: + dev_err(&adapter->pdev->dev, "EEH resume failed\n"); +} + +static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + u16 num_vf_qs; + int status; + + if (!num_vfs) + be_vf_clear(adapter); + + adapter->num_vfs = num_vfs; + + if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot disable VFs while they are assigned\n"); + return -EBUSY; + } + + /* When the HW is in SRIOV capable configuration, the PF-pool resources + * are equally distributed across the max-number of VFs. The user may + * request only a subset of the max-vfs to be enabled. + * Based on num_vfs, redistribute the resources across num_vfs so that + * each VF will have access to more number of resources. + * This facility is not available in BE3 FW. + * Also, this is done by FW in Lancer chip. */ - be_intr_set(adapter, true); + if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) { + num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs); + status = be_cmd_set_sriov_config(adapter, adapter->pool_res, + adapter->num_vfs, num_vf_qs); + if (status) + dev_err(&pdev->dev, + "Failed to optimize SR-IOV resources\n"); + } - /* tell fw we're ready to fire cmds */ - status = be_cmd_fw_init(adapter); + status = be_get_resources(adapter); if (status) - goto err; + return be_cmd_status(status); - status = be_setup(adapter); + /* Updating real_num_tx/rx_queues() requires rtnl_lock() */ + rtnl_lock(); + status = be_update_queues(adapter); + rtnl_unlock(); if (status) - goto err; + return be_cmd_status(status); - if (netif_running(netdev)) { - status = be_open(netdev); - if (status) - goto err; - } + if (adapter->num_vfs) + status = be_vf_setup(adapter); - schedule_delayed_work(&adapter->func_recovery_work, - msecs_to_jiffies(1000)); - netif_device_attach(netdev); - return; -err: - dev_err(&adapter->pdev->dev, "EEH resume failed\n"); + if (!status) + return adapter->num_vfs; + + return 0; } static const struct pci_error_handlers be_eeh_handlers = { @@ -5516,8 +5729,9 @@ static struct pci_driver be_driver = { .probe = be_probe, .remove = be_remove, .suspend = be_suspend, - .resume = be_resume, + .resume = be_pci_resume, .shutdown = be_shutdown, + .sriov_configure = be_pci_sriov_configure, .err_handler = &be_eeh_handlers }; @@ -5531,6 +5745,11 @@ static int __init be_init_module(void) rx_frag_size = 2048; } + if (num_vfs > 0) { + pr_info(DRV_NAME " : Module param num_vfs is obsolete."); + pr_info(DRV_NAME " : Use sysfs method to enable VFs\n"); + } + return pci_register_driver(&be_driver); } module_init(be_init_module); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index f88cfaa359e7..442410cd2ca4 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1299,7 +1299,7 @@ static int ethoc_resume(struct platform_device *pdev) # define ethoc_resume NULL #endif -static struct of_device_id ethoc_match[] = { +static const struct of_device_id ethoc_match[] = { { .compatible = "opencores,ethoc", }, {}, }; diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ba84c4a9ce32..25e3425729d0 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -58,14 +58,12 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig" config FSL_PQ_MDIO tristate "Freescale PQ MDIO" - depends on FSL_SOC select PHYLIB ---help--- This driver supports the MDIO bus used by the gianfar and UCC drivers. config FSL_XGMAC_MDIO tristate "Freescale XGMAC MDIO" - depends on FSL_SOC select PHYLIB select OF_MDIO ---help--- diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..f6a3a7abd468 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1189,13 +1189,12 @@ static void fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; - struct bufdesc *bdp, *bdp_t; + struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; struct fec_enet_priv_tx_q *txq; struct netdev_queue *nq; int index = 0; - int i, bdnum; int entries_free; fep = netdev_priv(ndev); @@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) if (bdp == txq->cur_tx) break; - bdp_t = bdp; - bdnum = 1; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - while (!skb) { - bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id); - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - bdnum++; - } - if (skb_shinfo(skb)->nr_frags && - (status = bdp_t->cbd_sc) & BD_ENET_TX_READY) - break; + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - for (i = 0; i < bdnum; i++) { - if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - if (i < bdnum - 1) - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); - } + skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; + if (!skb) { + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + continue; + } /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) vlan_packet_rcvd = true; - skb_copy_to_linear_data_offset(skb, VLAN_HLEN, - data, (2 * ETH_ALEN)); + memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); } @@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id) writel(int_events, fep->hwp + FEC_IEVENT); fec_enet_collect_events(fep, int_events); - if (fep->work_tx || fep->work_rx) { + if ((fep->work_tx || fep->work_rx) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { @@ -1967,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) struct fec_enet_private *fep = netdev_priv(ndev); struct device_node *node; int err = -ENXIO, i; + u32 mii_speed, holdtime; /* * The i.MX28 dual fec interfaces are not equal. @@ -2004,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev) * Reference Manual has an error on this, and gets fixed on i.MX6Q * document. */ - fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); + mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); if (fep->quirks & FEC_QUIRK_ENET_MAC) - fep->phy_speed--; - fep->phy_speed <<= 1; + mii_speed--; + if (mii_speed > 63) { + dev_err(&pdev->dev, + "fec clock (%lu) to fast to get right mii speed\n", + clk_get_rate(fep->clk_ipg)); + err = -EINVAL; + goto err_out; + } + + /* + * The i.MX28 and i.MX6 types have another filed in the MSCR (aka + * MII_SPEED) register that defines the MDIO output hold time. Earlier + * versions are RAZ there, so just ignore the difference and write the + * register always. + * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. + * HOLDTIME + 1 is the number of clk cycles the fec is holding the + * output. + * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). + * Given that ceil(clkrate / 5000000) <= 64, the calculation for + * holdtime cannot result in a value greater than 3. + */ + holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; + + fep->phy_speed = mii_speed << 1 | holdtime << 8; + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); fep->mii_bus = mdiobus_alloc(); @@ -3383,7 +3394,6 @@ fec_drv_remove(struct platform_device *pdev) regulator_disable(fep->reg_phy); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); - fec_enet_clk_enable(ndev, false); of_node_put(fep->phy_node); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index f495796248db..afe7f39cdd7c 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op) } #endif -static struct of_device_id mpc52xx_fec_match[] = { +static const struct of_device_id mpc52xx_fec_match[] = { { .compatible = "fsl,mpc5200b-fec", }, { .compatible = "fsl,mpc5200-fec", }, { .compatible = "mpc5200-fec", }, diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index e0528900db02..1e647beaf989 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) return 0; } -static struct of_device_id mpc52xx_fec_mdio_match[] = { +static const struct of_device_id mpc52xx_fec_mdio_match[] = { { .compatible = "fsl,mpc5200b-mdio", }, { .compatible = "fsl,mpc5200-mdio", }, { .compatible = "mpc5200b-fec-phy", }, diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 1f9cf2345266..a583d89b13c4 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -136,7 +136,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) */ writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); - /* It is recommended to doulbe check the TMODE field in the + /* It is recommended to double check the TMODE field in the * TCSR register to be cleared before the first compare counter * is written into TCCR register. Just add a double check. */ @@ -390,20 +390,18 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) * read the timecounter and return the correct value on ns, * after converting it into a struct timespec. */ -static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct fec_enet_private *adapter = container_of(ptp, struct fec_enet_private, ptp_caps); u64 ns; - u32 remainder; unsigned long flags; spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -417,7 +415,7 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * wall timer value. */ static int fec_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct fec_enet_private *fep = container_of(ptp, struct fec_enet_private, ptp_caps); @@ -433,8 +431,7 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, return -EINVAL; } - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); /* Get the timer value based on timestamp. * Update the counter with the masked value. */ @@ -584,8 +581,8 @@ void fec_ptp_init(struct platform_device *pdev) fep->ptp_caps.pps = 1; fep->ptp_caps.adjfreq = fec_ptp_adjfreq; fep->ptp_caps.adjtime = fec_ptp_adjtime; - fep->ptp_caps.gettime = fec_ptp_gettime; - fep->ptp_caps.settime = fec_ptp_settime; + fep->ptp_caps.gettime64 = fec_ptp_gettime; + fep->ptp_caps.settime64 = fec_ptp_settime; fep->ptp_caps.enable = fec_ptp_enable; fep->cycle_speed = clk_get_rate(fep->clk_ptp); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index a17628769a1f..9b3639eae676 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -916,7 +916,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; -static struct of_device_id fs_enet_match[]; +static const struct of_device_id fs_enet_match[]; static int fs_enet_probe(struct platform_device *ofdev) { const struct of_device_id *match; @@ -1082,7 +1082,7 @@ static int fs_enet_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id fs_enet_match[] = { +static const struct of_device_id fs_enet_match[] = { #ifdef CONFIG_FS_ENET_HAS_SCC { .compatible = "fsl,cpm1-scc-enet", diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 1d5617d2d8bd..68a428de0bc0 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id fs_enet_mdio_bb_match[] = { +static const struct of_device_id fs_enet_mdio_bb_match[] = { { .compatible = "fsl,cpm2-mdio-bitbang", }, diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 1648e3582500..2be383e6d258 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, } -static struct of_device_id fs_enet_mdio_fec_match[]; +static const struct of_device_id fs_enet_mdio_fec_match[]; static int fs_enet_mdio_probe(struct platform_device *ofdev) { const struct of_device_id *match; @@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id fs_enet_mdio_fec_match[] = { +static const struct of_device_id fs_enet_mdio_fec_match[] = { { .compatible = "fsl,pq1-fec-mdio", }, diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index d1a91e344e6b..3c40f6b99224 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) #endif -static struct of_device_id fsl_pq_mdio_match[] = { +static const struct of_device_id fsl_pq_mdio_match[] = { #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) { .compatible = "fsl,gianfar-tbi", diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 43df78882e48..4ee080d49bc0 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -158,7 +158,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, { u32 lstatus; - bdp->bufPtr = buf; + bdp->bufPtr = cpu_to_be32(buf); lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) @@ -166,7 +166,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, gfar_wmb(); - bdp->lstatus = lstatus; + bdp->lstatus = cpu_to_be32(lstatus); } static int gfar_init_bds(struct net_device *ndev) @@ -200,7 +200,8 @@ static int gfar_init_bds(struct net_device *ndev) /* Set the last descriptor in the ring to indicate wrap */ txbdp--; - txbdp->status |= TXBD_WRAP; + txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | + TXBD_WRAP); } rfbptr = ®s->rfbptr0; @@ -214,7 +215,7 @@ static int gfar_init_bds(struct net_device *ndev) struct sk_buff *skb = rx_queue->rx_skbuff[j]; if (skb) { - bufaddr = rxbdp->bufPtr; + bufaddr = be32_to_cpu(rxbdp->bufPtr); } else { skb = gfar_new_skb(ndev, &bufaddr); if (!skb) { @@ -696,19 +697,28 @@ static int gfar_parse_group(struct device_node *np, grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { - u32 *rxq_mask, *txq_mask; - rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); - txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); + u32 rxq_mask, txq_mask; + int ret; + + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + + ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); + if (!ret) { + grp->rx_bit_map = rxq_mask ? + rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } + + ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); + if (!ret) { + grp->tx_bit_map = txq_mask ? + txq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } if (priv->poll_mode == GFAR_SQ_POLLING) { /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - } else { /* GFAR_MQ_POLLING */ - grp->rx_bit_map = rxq_mask ? - *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); - grp->tx_bit_map = txq_mask ? - *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); } } else { grp->rx_bit_map = 0xFF; @@ -747,6 +757,18 @@ static int gfar_parse_group(struct device_node *np, return 0; } +static int gfar_of_group_count(struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + if (!of_node_cmp(child->name, "queue-group")) + num++; + + return num; +} + static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; @@ -757,11 +779,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) struct gfar_private *priv = NULL; struct device_node *np = ofdev->dev.of_node; struct device_node *child = NULL; - const u32 *stash; - const u32 *stash_len; - const u32 *stash_idx; + struct property *stash; + u32 stash_len = 0; + u32 stash_idx = 0; unsigned int num_tx_qs, num_rx_qs; - u32 *tx_queues, *rx_queues; unsigned short mode, poll_mode; if (!np) @@ -775,16 +796,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) poll_mode = GFAR_SQ_POLLING; } - /* parse the num of HW tx and rx queues */ - tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); - rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); - if (mode == SQ_SG_MODE) { num_tx_qs = 1; num_rx_qs = 1; } else { /* MQ_MG_MODE */ /* get the actual number of supported groups */ - unsigned int num_grps = of_get_available_child_count(np); + unsigned int num_grps = gfar_of_group_count(np); if (num_grps == 0 || num_grps > MAXGROUPS) { dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", @@ -797,8 +814,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) num_tx_qs = num_grps; /* one txq per int group */ num_rx_qs = num_grps; /* one rxq per int group */ } else { /* GFAR_MQ_POLLING */ - num_tx_qs = tx_queues ? *tx_queues : 1; - num_rx_qs = rx_queues ? *rx_queues : 1; + u32 tx_queues, rx_queues; + int ret; + + /* parse the num of HW tx and rx queues */ + ret = of_property_read_u32(np, "fsl,num_tx_queues", + &tx_queues); + num_tx_qs = ret ? 1 : tx_queues; + + ret = of_property_read_u32(np, "fsl,num_rx_queues", + &rx_queues); + num_rx_qs = ret ? 1 : rx_queues; } } @@ -839,19 +865,26 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (err) goto rx_alloc_failed; + err = of_property_read_string(np, "model", &model); + if (err) { + pr_err("Device model property missing, aborting\n"); + goto rx_alloc_failed; + } + /* Init Rx queue filer rule set linked list */ INIT_LIST_HEAD(&priv->rx_list.list); priv->rx_list.count = 0; mutex_init(&priv->rx_queue_access); - model = of_get_property(np, "model", NULL); - for (i = 0; i < MAXGROUPS; i++) priv->gfargrp[i].regs = NULL; /* Parse and initialize group specific information */ if (priv->mode == MQ_MG_MODE) { - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { + if (of_node_cmp(child->name, "queue-group")) + continue; + err = gfar_parse_group(child, priv, model); if (err) goto err_grp_init; @@ -862,22 +895,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) goto err_grp_init; } - stash = of_get_property(np, "bd-stash", NULL); + stash = of_find_property(np, "bd-stash", NULL); if (stash) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } - stash_len = of_get_property(np, "rx-stash-len", NULL); + err = of_property_read_u32(np, "rx-stash-len", &stash_len); - if (stash_len) - priv->rx_stash_size = *stash_len; + if (err == 0) + priv->rx_stash_size = stash_len; - stash_idx = of_get_property(np, "rx-stash-idx", NULL); + err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); - if (stash_idx) - priv->rx_stash_index = *stash_idx; + if (err == 0) + priv->rx_stash_index = stash_idx; if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; @@ -904,15 +937,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | FSL_GIANFAR_DEV_HAS_TIMER; - ctype = of_get_property(np, "phy-connection-type", NULL); + err = of_property_read_string(np, "phy-connection-type", &ctype); /* We only care about rgmii-id. The rest are autodetected */ - if (ctype && !strcmp(ctype, "rgmii-id")) + if (err == 0 && !strcmp(ctype, "rgmii-id")) priv->interface = PHY_INTERFACE_MODE_RGMII_ID; else priv->interface = PHY_INTERFACE_MODE_MII; - if (of_get_property(np, "fsl,magic-packet", NULL)) + if (of_find_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); @@ -1869,14 +1902,15 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) if (!tx_queue->tx_skbuff[i]) continue; - dma_unmap_single(priv->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), DMA_TO_DEVICE); txbdp->lstatus = 0; for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { txbdp++; - dma_unmap_page(priv->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), + DMA_TO_DEVICE); } txbdp++; dev_kfree_skb_any(tx_queue->tx_skbuff[i]); @@ -1896,7 +1930,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) for (i = 0; i < rx_queue->rx_ring_size; i++) { if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, rxbdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), priv->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_queue->rx_skbuff[i]); @@ -2152,16 +2186,16 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; - fcb->phcs = udp_hdr(skb)->check; + fcb->phcs = (__force __be16)(udp_hdr(skb)->check); } else - fcb->phcs = tcp_hdr(skb)->check; + fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); /* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the * l3 hdr and the l4 hdr */ - fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); + fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); fcb->l4os = skb_network_header_len(skb); fcb->flags = flags; @@ -2170,7 +2204,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) { fcb->flags |= TXFCB_VLN; - fcb->vlctl = skb_vlan_tag_get(skb); + fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); } static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, @@ -2283,7 +2317,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; - lstatus = txbdp->lstatus; + lstatus = be32_to_cpu(txbdp->lstatus); /* Time stamp insertion requires one additional TxBD */ if (unlikely(do_tstamp)) @@ -2291,11 +2325,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->tx_ring_size); if (nr_frags == 0) { - if (unlikely(do_tstamp)) - txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | - TXBD_INTERRUPT); - else + if (unlikely(do_tstamp)) { + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); + } else { lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } } else { /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { @@ -2305,7 +2342,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) frag_len = skb_shinfo(skb)->frags[i].size; - lstatus = txbdp->lstatus | frag_len | + lstatus = be32_to_cpu(txbdp->lstatus) | frag_len | BD_LFLAG(TXBD_READY); /* Handle the last BD specially */ @@ -2321,11 +2358,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; /* set the TxBD length and buffer pointer */ - txbdp->bufPtr = bufaddr; - txbdp->lstatus = lstatus; + txbdp->bufPtr = cpu_to_be32(bufaddr); + txbdp->lstatus = cpu_to_be32(lstatus); } - lstatus = txbdp_start->lstatus; + lstatus = be32_to_cpu(txbdp_start->lstatus); } /* Add TxPAL between FCB and frame if required */ @@ -2373,7 +2410,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(dma_mapping_error(priv->dev, bufaddr))) goto dma_map_err; - txbdp_start->bufPtr = bufaddr; + txbdp_start->bufPtr = cpu_to_be32(bufaddr); /* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of @@ -2381,9 +2418,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) * the full frame length. */ if (unlikely(do_tstamp)) { - txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; - txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | - (skb_headlen(skb) - fcb_len); + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + bufaddr = be32_to_cpu(txbdp_start->bufPtr); + bufaddr += fcb_len; + lstatus_ts |= BD_LFLAG(TXBD_READY) | + (skb_headlen(skb) - fcb_len); + + txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); @@ -2406,7 +2449,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) gfar_wmb(); - txbdp_start->lstatus = lstatus; + txbdp_start->lstatus = cpu_to_be32(lstatus); gfar_wmb(); /* force lstatus write before tx_skbuff */ @@ -2445,13 +2488,14 @@ dma_map_err: if (do_tstamp) txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); for (i = 0; i < nr_frags; i++) { - lstatus = txbdp->lstatus; + lstatus = be32_to_cpu(txbdp->lstatus); if (!(lstatus & BD_LFLAG(TXBD_READY))) break; - txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY); - bufaddr = txbdp->bufPtr; - dma_unmap_page(priv->dev, bufaddr, txbdp->length, + lstatus &= ~BD_LFLAG(TXBD_READY); + txbdp->lstatus = cpu_to_be32(lstatus); + bufaddr = be32_to_cpu(txbdp->bufPtr); + dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), DMA_TO_DEVICE); txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); } @@ -2592,7 +2636,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); - lstatus = lbdp->lstatus; + lstatus = be32_to_cpu(lbdp->lstatus); /* Only clean completed frames */ if ((lstatus & BD_LFLAG(TXBD_READY)) && @@ -2601,11 +2645,12 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { next = next_txbd(bdp, base, tx_ring_size); - buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; + buflen = be16_to_cpu(next->length) + + GMAC_FCB_LEN + GMAC_TXPAL_LEN; } else - buflen = bdp->length; + buflen = be16_to_cpu(bdp->length); - dma_unmap_single(priv->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), buflen, DMA_TO_DEVICE); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { @@ -2616,17 +2661,18 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) shhwtstamps.hwtstamp = ns_to_ktime(*ns); skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); skb_tstamp_tx(skb, &shhwtstamps); - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + gfar_clear_txbd_status(bdp); bdp = next; } - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + gfar_clear_txbd_status(bdp); bdp = next_txbd(bdp, base, tx_ring_size); for (i = 0; i < frags; i++) { - dma_unmap_page(priv->dev, bdp->bufPtr, - bdp->length, DMA_TO_DEVICE); - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), + be16_to_cpu(bdp->length), + DMA_TO_DEVICE); + gfar_clear_txbd_status(bdp); bdp = next_txbd(bdp, base, tx_ring_size); } @@ -2783,13 +2829,13 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) * were verified, then we tell the kernel that no * checksumming is necessary. Otherwise, it is [FIXME] */ - if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) + if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == + (RXFCB_CIP | RXFCB_CTU)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); } - /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull, struct napi_struct *napi) @@ -2831,8 +2877,9 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, * RXFCB_VLN is pseudo randomly set. */ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && - fcb->flags & RXFCB_VLN) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); + be16_to_cpu(fcb->flags) & RXFCB_VLN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(fcb->vlctl)); /* Send the packet up the stack */ napi_gro_receive(napi, skb); @@ -2859,7 +2906,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; - while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { + while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { struct sk_buff *newskb; dma_addr_t bufaddr; @@ -2870,21 +2917,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; - dma_unmap_single(priv->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(!(bdp->status & RXBD_ERR) && - bdp->length > priv->rx_buffer_size)) - bdp->status = RXBD_LARGE; + if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && + be16_to_cpu(bdp->length) > priv->rx_buffer_size)) + bdp->status = cpu_to_be16(RXBD_LARGE); /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || - bdp->status & RXBD_ERR)) { - count_errors(bdp->status, dev); + if (unlikely(!newskb || + !(be16_to_cpu(bdp->status) & RXBD_LAST) || + be16_to_cpu(bdp->status) & RXBD_ERR)) { + count_errors(be16_to_cpu(bdp->status), dev); if (unlikely(!newskb)) { newskb = skb; - bufaddr = bdp->bufPtr; + bufaddr = be32_to_cpu(bdp->bufPtr); } else if (skb) dev_kfree_skb(skb); } else { @@ -2893,7 +2941,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) howmany++; if (likely(skb)) { - pkt_len = bdp->length - ETH_FCS_LEN; + pkt_len = be16_to_cpu(bdp->length) - + ETH_FCS_LEN; /* Remove the FCS from the packet length */ skb_put(skb, pkt_len); rx_queue->stats.rx_bytes += pkt_len; @@ -3162,8 +3211,8 @@ static void adjust_link(struct net_device *dev) struct phy_device *phydev = priv->phydev; if (unlikely(phydev->link != priv->oldlink || - phydev->duplex != priv->oldduplex || - phydev->speed != priv->oldspeed)) + (phydev->link && (phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)))) gfar_update_link_state(priv); } @@ -3545,7 +3594,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) phy_print_status(phydev); } -static struct of_device_id gfar_match[] = +static const struct of_device_id gfar_match[] = { { .type = "network", diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 9e1802400c23..daa1d37de642 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -544,12 +544,12 @@ struct txbd8 { union { struct { - u16 status; /* Status Fields */ - u16 length; /* Buffer length */ + __be16 status; /* Status Fields */ + __be16 length; /* Buffer length */ }; - u32 lstatus; + __be32 lstatus; }; - u32 bufPtr; /* Buffer Pointer */ + __be32 bufPtr; /* Buffer Pointer */ }; struct txfcb { @@ -557,28 +557,28 @@ struct txfcb { u8 ptp; /* Flag to enable tx timestamping */ u8 l4os; /* Level 4 Header Offset */ u8 l3os; /* Level 3 Header Offset */ - u16 phcs; /* Pseudo-header Checksum */ - u16 vlctl; /* VLAN control word */ + __be16 phcs; /* Pseudo-header Checksum */ + __be16 vlctl; /* VLAN control word */ }; struct rxbd8 { union { struct { - u16 status; /* Status Fields */ - u16 length; /* Buffer Length */ + __be16 status; /* Status Fields */ + __be16 length; /* Buffer Length */ }; - u32 lstatus; + __be32 lstatus; }; - u32 bufPtr; /* Buffer Pointer */ + __be32 bufPtr; /* Buffer Pointer */ }; struct rxfcb { - u16 flags; + __be16 flags; u8 rq; /* Receive Queue index */ u8 pro; /* Layer 4 Protocol */ u16 reserved; - u16 vlctl; /* VLAN control word */ + __be16 vlctl; /* VLAN control word */ }; struct gianfar_skb_cb { @@ -1287,6 +1287,14 @@ static inline void gfar_wmb(void) #endif } +static inline void gfar_clear_txbd_status(struct txbd8 *bdp) +{ + u32 lstatus = be32_to_cpu(bdp->lstatus); + + lstatus &= BD_LFLAG(TXBD_WRAP); + bdp->lstatus = cpu_to_be32(lstatus); +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 16826341a4c9..8e3cd77aa347 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -322,10 +322,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct etsects *etsects = container_of(ptp, struct etsects, caps); @@ -335,20 +335,19 @@ static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts) spin_unlock_irqrestore(&etsects->lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); + return 0; } static int ptp_gianfar_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { u64 ns; unsigned long flags; struct etsects *etsects = container_of(ptp, struct etsects, caps); - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); spin_lock_irqsave(&etsects->lock, flags); @@ -418,8 +417,8 @@ static struct ptp_clock_info ptp_gianfar_caps = { .pps = 1, .adjfreq = ptp_gianfar_adjfreq, .adjtime = ptp_gianfar_adjtime, - .gettime = ptp_gianfar_gettime, - .settime = ptp_gianfar_settime, + .gettime64 = ptp_gianfar_gettime, + .settime64 = ptp_gianfar_settime, .enable = ptp_gianfar_enable, }; @@ -440,7 +439,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) { struct device_node *node = dev->dev.of_node; struct etsects *etsects; - struct timespec now; + struct timespec64 now; int err = -ENOMEM; u32 tmr_ctrl; unsigned long flags; @@ -495,7 +494,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) pr_err("ioremap ptp registers failed\n"); goto no_ioremap; } - getnstimeofday(&now); + getnstimeofday64(&now); ptp_gianfar_settime(&etsects->caps, &now); tmr_ctrl = @@ -554,7 +553,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) return 0; } -static struct of_device_id match_table[] = { +static const struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, {}, }; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 357e8b576905..4dd40e057f40 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev) ugeth->phy_interface = phy_interface; ugeth->max_speed = max_speed; + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(dev); + err = register_netdev(dev); if (err) { if (netif_msg_probe(ugeth)) @@ -3930,7 +3933,7 @@ static int ucc_geth_remove(struct platform_device* ofdev) return 0; } -static struct of_device_id ucc_geth_match[] = { +static const struct of_device_id ucc_geth_match[] = { { .type = "network", .compatible = "ucc_geth", diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 3a83bc2c613c..7b8fe866f603 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -46,17 +46,43 @@ struct tgec_mdio_controller { #define MDIO_DATA(x) (x & 0xffff) #define MDIO_DATA_BSY BIT(31) +struct mdio_fsl_priv { + struct tgec_mdio_controller __iomem *mdio_base; + bool is_little_endian; +}; + +static u32 xgmac_read32(void __iomem *regs, + bool is_little_endian) +{ + if (is_little_endian) + return ioread32(regs); + else + return ioread32be(regs); +} + +static void xgmac_write32(u32 value, + void __iomem *regs, + bool is_little_endian) +{ + if (is_little_endian) + iowrite32(value, regs); + else + iowrite32be(value, regs); +} + /* * Wait until the MDIO bus is free */ static int xgmac_wait_until_free(struct device *dev, - struct tgec_mdio_controller __iomem *regs) + struct tgec_mdio_controller __iomem *regs, + bool is_little_endian) { unsigned int timeout; /* Wait till the bus is free */ timeout = TIMEOUT; - while ((ioread32be(®s->mdio_stat) & MDIO_STAT_BSY) && timeout) { + while ((xgmac_read32(®s->mdio_stat, is_little_endian) & + MDIO_STAT_BSY) && timeout) { cpu_relax(); timeout--; } @@ -73,13 +99,15 @@ static int xgmac_wait_until_free(struct device *dev, * Wait till the MDIO read or write operation is complete */ static int xgmac_wait_until_done(struct device *dev, - struct tgec_mdio_controller __iomem *regs) + struct tgec_mdio_controller __iomem *regs, + bool is_little_endian) { unsigned int timeout; /* Wait till the MDIO write is complete */ timeout = TIMEOUT; - while ((ioread32be(®s->mdio_data) & MDIO_DATA_BSY) && timeout) { + while ((xgmac_read32(®s->mdio_stat, is_little_endian) & + MDIO_STAT_BSY) && timeout) { cpu_relax(); timeout--; } @@ -99,12 +127,14 @@ static int xgmac_wait_until_done(struct device *dev, */ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) { - struct tgec_mdio_controller __iomem *regs = bus->priv; + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; uint16_t dev_addr; u32 mdio_ctl, mdio_stat; int ret; + bool endian = priv->is_little_endian; - mdio_stat = ioread32be(®s->mdio_stat); + mdio_stat = xgmac_read32(®s->mdio_stat, endian); if (regnum & MII_ADDR_C45) { /* Clause 45 (ie 10G) */ dev_addr = (regnum >> 16) & 0x1f; @@ -115,29 +145,29 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val mdio_stat &= ~MDIO_STAT_ENC; } - iowrite32be(mdio_stat, ®s->mdio_stat); + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; /* Set the port and dev addr */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - iowrite32be(mdio_ctl, ®s->mdio_ctl); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); /* Set the register address */ if (regnum & MII_ADDR_C45) { - iowrite32be(regnum & 0xffff, ®s->mdio_addr); + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; } /* Write the value to the register */ - iowrite32be(MDIO_DATA(value), ®s->mdio_data); + xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); - ret = xgmac_wait_until_done(&bus->dev, regs); + ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) return ret; @@ -151,14 +181,16 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val */ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { - struct tgec_mdio_controller __iomem *regs = bus->priv; + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; uint16_t dev_addr; uint32_t mdio_stat; uint32_t mdio_ctl; uint16_t value; int ret; + bool endian = priv->is_little_endian; - mdio_stat = ioread32be(®s->mdio_stat); + mdio_stat = xgmac_read32(®s->mdio_stat, endian); if (regnum & MII_ADDR_C45) { dev_addr = (regnum >> 16) & 0x1f; mdio_stat |= MDIO_STAT_ENC; @@ -167,41 +199,41 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) mdio_stat &= ~MDIO_STAT_ENC; } - iowrite32be(mdio_stat, ®s->mdio_stat); + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; /* Set the Port and Device Addrs */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - iowrite32be(mdio_ctl, ®s->mdio_ctl); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); /* Set the register address */ if (regnum & MII_ADDR_C45) { - iowrite32be(regnum & 0xffff, ®s->mdio_addr); + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; } /* Initiate the read */ - iowrite32be(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl); + xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); - ret = xgmac_wait_until_done(&bus->dev, regs); + ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) return ret; /* Return all Fs if nothing was there */ - if (ioread32be(®s->mdio_stat) & MDIO_STAT_RD_ER) { + if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { dev_err(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); return 0xffff; } - value = ioread32be(®s->mdio_data) & 0xffff; + value = xgmac_read32(®s->mdio_data, endian) & 0xffff; dev_dbg(&bus->dev, "read %04x\n", value); return value; @@ -212,6 +244,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct resource res; + struct mdio_fsl_priv *priv; int ret; ret = of_address_to_resource(np, 0, &res); @@ -220,7 +253,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) return ret; } - bus = mdiobus_alloc(); + bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); if (!bus) return -ENOMEM; @@ -231,12 +264,19 @@ static int xgmac_mdio_probe(struct platform_device *pdev) snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); /* Set the PHY base address */ - bus->priv = of_iomap(np, 0); - if (!bus->priv) { + priv = bus->priv; + priv->mdio_base = of_iomap(np, 0); + if (!priv->mdio_base) { ret = -ENOMEM; goto err_ioremap; } + if (of_get_property(pdev->dev.of_node, + "little-endian", NULL)) + priv->is_little_endian = true; + else + priv->is_little_endian = false; + ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "cannot register MDIO bus\n"); @@ -248,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) return 0; err_registration: - iounmap(bus->priv); + iounmap(priv->mdio_base); err_ioremap: mdiobus_free(bus); @@ -267,7 +307,7 @@ static int xgmac_mdio_remove(struct platform_device *pdev) return 0; } -static struct of_device_id xgmac_mdio_match[] = { +static const struct of_device_id xgmac_mdio_match[] = { { .compatible = "fsl,fman-xmdio", }, diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index b72d238695d7..3b39fdddeb57 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -413,6 +413,15 @@ out: return count; } +static void hip04_start_tx_timer(struct hip04_priv *priv) +{ + unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2; + + /* allow timer to fire after half the time at the earliest */ + hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns), + ns, HRTIMER_MODE_REL); +} + static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct hip04_priv *priv = netdev_priv(ndev); @@ -466,8 +475,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { /* cleanup not pending yet, start a new timer */ - hrtimer_start_expires(&priv->tx_coalesce_timer, - HRTIMER_MODE_REL); + hip04_start_tx_timer(priv); } return NETDEV_TX_OK; @@ -549,7 +557,7 @@ done: /* clean up tx descriptors and start a new timer if necessary */ tx_remaining = hip04_tx_reclaim(ndev, false); if (rx < budget && tx_remaining) - hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL); + hip04_start_tx_timer(priv); return rx; } @@ -809,7 +817,6 @@ static int hip04_mac_probe(struct platform_device *pdev) struct hip04_priv *priv; struct resource *res; unsigned int irq; - ktime_t txtime; int ret; ndev = alloc_etherdev(sizeof(struct hip04_priv)); @@ -846,9 +853,6 @@ static int hip04_mac_probe(struct platform_device *pdev) */ priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; priv->tx_coalesce_usecs = 200; - /* allow timer to fire after half the time at the earliest */ - txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2); - hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime); priv->tx_coalesce_timer.function = tx_done; priv->map = syscon_node_to_regmap(arg.np); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index e8a1adb7a962..291c87036e17 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -103,7 +103,7 @@ static int ehea_probe_adapter(struct platform_device *dev); static int ehea_remove(struct platform_device *dev); -static struct of_device_id ehea_module_device_table[] = { +static const struct of_device_id ehea_module_device_table[] = { { .name = "lhea", .compatible = "IBM,lhea", @@ -116,7 +116,7 @@ static struct of_device_id ehea_module_device_table[] = { }; MODULE_DEVICE_TABLE(of, ehea_module_device_table); -static struct of_device_id ehea_device_table[] = { +static const struct of_device_id ehea_device_table[] = { { .name = "lhea", .compatible = "IBM,lhea", @@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev) device_remove_file(&dev->dev, &dev_attr_remove_port); } +static int ehea_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + if (action == SYS_RESTART) { + pr_info("Reboot: freeing all eHEA resources\n"); + ibmebus_unregister_driver(&ehea_driver); + } + return NOTIFY_DONE; +} + +static struct notifier_block ehea_reboot_nb = { + .notifier_call = ehea_reboot_notifier, +}; + +static int ehea_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int ret = NOTIFY_BAD; + struct memory_notify *arg = data; + + mutex_lock(&dlpar_mem_lock); + + switch (action) { + case MEM_CANCEL_OFFLINE: + pr_info("memory offlining canceled"); + /* Fall through: re-add canceled memory block */ + + case MEM_ONLINE: + pr_info("memory is going online"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + case MEM_GOING_OFFLINE: + pr_info("memory is going offline"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + default: + break; + } + + ehea_update_firmware_handles(); + ret = NOTIFY_OK; + +out_unlock: + mutex_unlock(&dlpar_mem_lock); + return ret; +} + +static struct notifier_block ehea_mem_nb = { + .notifier_call = ehea_mem_notifier, +}; + +static void ehea_crash_handler(void) +{ + int i; + + if (ehea_fw_handles.arr) + for (i = 0; i < ehea_fw_handles.num_entries; i++) + ehea_h_free_resource(ehea_fw_handles.arr[i].adh, + ehea_fw_handles.arr[i].fwh, + FORCE_FREE); + + if (ehea_bcmc_regs.arr) + for (i = 0; i < ehea_bcmc_regs.num_entries; i++) + ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, + ehea_bcmc_regs.arr[i].port_id, + ehea_bcmc_regs.arr[i].reg_type, + ehea_bcmc_regs.arr[i].macaddr, + 0, H_DEREG_BCMC); +} + +static atomic_t ehea_memory_hooks_registered; + +/* Register memory hooks on probe of first adapter */ +static int ehea_register_memory_hooks(void) +{ + int ret = 0; + + if (atomic_inc_and_test(&ehea_memory_hooks_registered)) + return 0; + + ret = ehea_create_busmap(); + if (ret) { + pr_info("ehea_create_busmap failed\n"); + goto out; + } + + ret = register_reboot_notifier(&ehea_reboot_nb); + if (ret) { + pr_info("register_reboot_notifier failed\n"); + goto out; + } + + ret = register_memory_notifier(&ehea_mem_nb); + if (ret) { + pr_info("register_memory_notifier failed\n"); + goto out2; + } + + ret = crash_shutdown_register(ehea_crash_handler); + if (ret) { + pr_info("crash_shutdown_register failed\n"); + goto out3; + } + + return 0; + +out3: + unregister_memory_notifier(&ehea_mem_nb); +out2: + unregister_reboot_notifier(&ehea_reboot_nb); +out: + return ret; +} + +static void ehea_unregister_memory_hooks(void) +{ + if (atomic_read(&ehea_memory_hooks_registered)) + return; + + unregister_reboot_notifier(&ehea_reboot_nb); + if (crash_shutdown_unregister(ehea_crash_handler)) + pr_info("failed unregistering crash handler\n"); + unregister_memory_notifier(&ehea_mem_nb); +} + static int ehea_probe_adapter(struct platform_device *dev) { struct ehea_adapter *adapter; @@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev) int ret; int i; + ret = ehea_register_memory_hooks(); + if (ret) + return ret; + if (!dev || !dev->dev.of_node) { pr_err("Invalid ibmebus device probed\n"); return -EINVAL; @@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev) return 0; } -static void ehea_crash_handler(void) -{ - int i; - - if (ehea_fw_handles.arr) - for (i = 0; i < ehea_fw_handles.num_entries; i++) - ehea_h_free_resource(ehea_fw_handles.arr[i].adh, - ehea_fw_handles.arr[i].fwh, - FORCE_FREE); - - if (ehea_bcmc_regs.arr) - for (i = 0; i < ehea_bcmc_regs.num_entries; i++) - ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, - ehea_bcmc_regs.arr[i].port_id, - ehea_bcmc_regs.arr[i].reg_type, - ehea_bcmc_regs.arr[i].macaddr, - 0, H_DEREG_BCMC); -} - -static int ehea_mem_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - int ret = NOTIFY_BAD; - struct memory_notify *arg = data; - - mutex_lock(&dlpar_mem_lock); - - switch (action) { - case MEM_CANCEL_OFFLINE: - pr_info("memory offlining canceled"); - /* Readd canceled memory block */ - case MEM_ONLINE: - pr_info("memory is going online"); - set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); - if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) - goto out_unlock; - ehea_rereg_mrs(); - break; - case MEM_GOING_OFFLINE: - pr_info("memory is going offline"); - set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); - if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) - goto out_unlock; - ehea_rereg_mrs(); - break; - default: - break; - } - - ehea_update_firmware_handles(); - ret = NOTIFY_OK; - -out_unlock: - mutex_unlock(&dlpar_mem_lock); - return ret; -} - -static struct notifier_block ehea_mem_nb = { - .notifier_call = ehea_mem_notifier, -}; - -static int ehea_reboot_notifier(struct notifier_block *nb, - unsigned long action, void *unused) -{ - if (action == SYS_RESTART) { - pr_info("Reboot: freeing all eHEA resources\n"); - ibmebus_unregister_driver(&ehea_driver); - } - return NOTIFY_DONE; -} - -static struct notifier_block ehea_reboot_nb = { - .notifier_call = ehea_reboot_notifier, -}; - static int check_module_parm(void) { int ret = 0; @@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void) if (ret) goto out; - ret = ehea_create_busmap(); - if (ret) - goto out; - - ret = register_reboot_notifier(&ehea_reboot_nb); - if (ret) - pr_info("failed registering reboot notifier\n"); - - ret = register_memory_notifier(&ehea_mem_nb); - if (ret) - pr_info("failed registering memory remove notifier\n"); - - ret = crash_shutdown_register(ehea_crash_handler); - if (ret) - pr_info("failed registering crash handler\n"); - ret = ibmebus_register_driver(&ehea_driver); if (ret) { pr_err("failed registering eHEA device driver on ebus\n"); - goto out2; + goto out; } ret = driver_create_file(&ehea_driver.driver, @@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void) if (ret) { pr_err("failed to register capabilities attribute, ret=%d\n", ret); - goto out3; + goto out2; } return ret; -out3: - ibmebus_unregister_driver(&ehea_driver); out2: - unregister_memory_notifier(&ehea_mem_nb); - unregister_reboot_notifier(&ehea_reboot_nb); - crash_shutdown_unregister(ehea_crash_handler); + ibmebus_unregister_driver(&ehea_driver); out: return ret; } static void __exit ehea_module_exit(void) { - int ret; - driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); - unregister_reboot_notifier(&ehea_reboot_nb); - ret = crash_shutdown_unregister(ehea_crash_handler); - if (ret) - pr_info("failed unregistering crash handler\n"); - unregister_memory_notifier(&ehea_mem_nb); + ehea_unregister_memory_hooks(); kfree(ehea_fw_handles.arr); kfree(ehea_bcmc_regs.arr); ehea_destroy_busmap(); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 162762d1a12c..de7919322190 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -79,13 +79,6 @@ MODULE_AUTHOR ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>"); MODULE_LICENSE("GPL"); -/* - * PPC64 doesn't (yet) have a cacheable_memcpy - */ -#ifdef CONFIG_PPC64 -#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n)) -#endif - /* minimum number of free TX descriptors required to wake up TX process */ #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4) @@ -1673,7 +1666,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) dev_kfree_skb(dev->rx_sg_skb); dev->rx_sg_skb = NULL; } else { - cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb), + memcpy(skb_tail_pointer(dev->rx_sg_skb), dev->rx_skb[slot]->data, len); skb_put(dev->rx_sg_skb, len); emac_recycle_rx_skb(dev, slot, len); @@ -1730,8 +1723,7 @@ static int emac_poll_rx(void *param, int budget) goto oom; skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2); - cacheable_memcpy(copy_skb->data - 2, skb->data - 2, - len + 2); + memcpy(copy_skb->data - 2, skb->data - 2, len + 2); emac_recycle_rx_skb(dev, slot, len); skb = copy_skb; } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) @@ -2981,7 +2973,7 @@ static int emac_remove(struct platform_device *ofdev) } /* XXX Features in here should be replaced by properties... */ -static struct of_device_id emac_match[] = +static const struct of_device_id emac_match[] = { { .type = "network", diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index dddaab11a4c7..fdb5cdb3cd15 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -753,7 +753,7 @@ static int mal_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id mal_platform_match[] = +static const struct of_device_id mal_platform_match[] = { { .compatible = "ibm,mcmal", diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 457088fc5b06..206ccbbae7bb 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -305,7 +305,7 @@ static int rgmii_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id rgmii_match[] = +static const struct of_device_id rgmii_match[] = { { .compatible = "ibm,rgmii", diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index cb18e7f917c6..32cb6c9007c5 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -148,7 +148,7 @@ static int tah_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id tah_match[] = +static const struct of_device_id tah_match[] = { { .compatible = "ibm,tah", diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 36409ccb75ea..8727b865ea02 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -295,7 +295,7 @@ static int zmii_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id zmii_match[] = +static const struct of_device_id zmii_match[] = { { .compatible = "ibm,zmii", diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 21978cc019e7..cd7675ac5bf9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1136,6 +1136,8 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { + napi_complete(napi); + /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ @@ -1144,8 +1146,6 @@ restart_poll: BUG_ON(lpar_rc != H_SUCCESS); - napi_complete(napi); - if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, @@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) return ret; } +static int ibmveth_set_mac_addr(struct net_device *dev, void *p) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + struct sockaddr *addr = p; + u64 mac_address; + int rc; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mac_address = ibmveth_encode_mac_addr(addr->sa_data); + rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); + if (rc) { + netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); + return rc; + } + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + static const struct net_device_ops ibmveth_netdev_ops = { .ndo_open = ibmveth_open, .ndo_stop = ibmveth_close, @@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { .ndo_fix_features = ibmveth_fix_features, .ndo_set_features = ibmveth_set_features, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = ibmveth_set_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmveth_poll_controller, #endif diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index e9c3a87e5b11..1a450f4b6b12 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -414,7 +414,7 @@ enum cb_status { /** * cb_command - Command Block flags - * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory */ enum cb_command { cb_nop = 0x0000, @@ -899,7 +899,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, /* Order is important otherwise we'll be in a race with h/w: * set S-bit in current first, then clear S-bit in previous. */ cb->command |= cpu_to_le16(cb_s); - wmb(); + dma_wmb(); cb->prev->command &= cpu_to_le16(~cb_s); while (nic->cb_to_send != nic->cb_to_use) { @@ -1843,7 +1843,7 @@ static int e100_tx_clean(struct nic *nic) for (cb = nic->cb_to_clean; cb->status & cpu_to_le16(cb_complete); cb = nic->cb_to_clean = cb->next) { - rmb(); /* read skb after status */ + dma_rmb(); /* read skb after status */ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, "cb[%d]->status = 0x%04X\n", (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), @@ -1993,7 +1993,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, "status=0x%04X\n", rfd_status); - rmb(); /* read size after status bit */ + dma_rmb(); /* read size after status bit */ /* If data isn't ready, nothing to indicate */ if (unlikely(!(rfd_status & cb_complete))) { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 7f997d36948f..983eb4e6f7aa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count); @@ -516,6 +521,7 @@ void e1000_down(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; u32 rctl, tctl; + netif_carrier_off(netdev); /* disable receives in the hardware */ rctl = er32(RCTL); @@ -544,7 +550,6 @@ void e1000_down(struct e1000_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; - netif_carrier_off(netdev); e1000_reset(adapter); e1000_clean_all_tx_rings(adapter); @@ -1111,7 +1116,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (e1000_read_mac_addr(hw)) e_err(probe, "EEPROM Read Error\n"); } - /* don't block initalization here due to bad MAC address */ + /* don't block initialization here due to bad MAC address */ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) @@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) msleep(1); /* e1000_down has a dependency on max_frame_size */ hw->max_frame_size = max_frame; - if (netif_running(netdev)) + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; e1000_down(adapter); + } /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next @@ -3848,7 +3856,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ + dma_rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -4146,7 +4154,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; @@ -4367,7 +4375,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; length = le16_to_cpu(rx_desc->length); diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index bb7ab3c321d6..0570c668ec3d 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -141,6 +141,7 @@ #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 9416e5a7e0c8..5d9ceb17b4cb 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -132,6 +132,7 @@ enum e1000_boards { board_pchlan, board_pch2lan, board_pch_lpt, + board_pch_spt }; struct e1000_ps_page { @@ -342,6 +343,7 @@ struct e1000_adapter { struct timecounter tc; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; u16 eee_advert; }; @@ -501,6 +503,7 @@ extern const struct e1000_info e1000_ich10_info; extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_pch_spt_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 865ce45f9ec3..11f486e4ff7b 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: mask |= (1 << 18); break; default: break; } - if (mac->type == e1000_pch_lpt) + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || + (mac->type == e1000_pch_spt)) { /* Cannot test write-protected SHRAL[n] registers */ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) continue; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 72f5475c4b90..19e8c487db06 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -87,6 +87,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_I218_V2 0x15A1 #define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ #define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ #define E1000_REVISION_4 4 @@ -108,6 +112,7 @@ enum e1000_mac_type { e1000_pchlan, e1000_pch2lan, e1000_pch_lpt, + e1000_pch_spt, }; enum e1000_media_type { @@ -153,6 +158,7 @@ enum e1000_bus_width { e1000_bus_width_pcie_x1, e1000_bus_width_pcie_x2, e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, e1000_bus_width_32, e1000_bus_width_64, e1000_bus_width_reserved diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 48b74a549155..9d81c0317433 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); @@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Unforce SMBus mode in PHY */ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; @@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) /* fall-through */ case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -590,35 +601,54 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; - - /* Can't read flash registers if the register set isn't mapped. */ - if (!hw->flash_address) { - e_dbg("ERROR: Flash registers not mapped\n"); - return -E1000_ERR_CONFIG; - } + u32 nvm_size; nvm->type = e1000_nvm_flash_sw; - gfpreg = er32flash(ICH_FLASH_GFPREG); + if (hw->mac.type == e1000_pch_spt) { + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register. This is because in SPT the GbE Flash region + * is no longer accessed through the flash registers. Instead, + * the mechanism has changed, and the Flash region access + * registers are now implemented in GbE memory space. + */ + nvm->flash_base_addr = 0; + nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + /* Can't read flash registers if register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } - /* sector_X_addr is a "sector"-aligned address (4096 bytes) - * Add 1 to sector_end_addr since this sector is included in - * the overall size. - */ - sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; - sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + gfpreg = er32flash(ICH_FLASH_GFPREG); - /* flash_base_addr is byte-aligned */ - nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; - /* find total size of the NVM, then cut in half since the total - * size represents two separate NVM banks. - */ - nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT); - nvm->flash_bank_size /= 2; - /* Adjust to word count */ - nvm->flash_bank_size /= sizeof(u16); + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; @@ -682,6 +712,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) mac->ops.rar_set = e1000_rar_set_pch2lan; /* fall-through */ case e1000_pch_lpt: + case e1000_pch_spt: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -699,7 +730,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = @@ -919,8 +950,9 @@ release: /* clear FEXTNVM6 bit 8 on link down or 10/100 */ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; - if (!link || ((status & E1000_STATUS_SPEED_100) && - (status & E1000_STATUS_FD))) + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) goto update_fextnvm6; ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); @@ -1100,6 +1132,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + if (ret_val) + goto release; + } + /* Force SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) @@ -1302,7 +1349,8 @@ out: static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; bool link; u16 phy_reg; @@ -1333,48 +1381,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * the IPG and reduce Rx latency in the PHY. */ if (((hw->mac.type == e1000_pch2lan) || - (hw->mac.type == e1000_pch_lpt)) && link) { + (hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && link) { u32 reg; reg = er32(STATUS); + tipg_reg = er32(TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { - u16 emi_addr; + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else { - reg = er32(TIPG); - reg &= ~E1000_TIPG_IPGT_MASK; - reg |= 0xFF; - ew32(TIPG, reg); + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } - /* Reduce Rx latency in analog PHY */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; + ew32(TIPG, tipg_reg); - if (hw->mac.type == e1000_pch2lan) - emi_addr = I82579_RX_CONFIG; - else - emi_addr = I217_RX_CONFIG; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; - ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); - hw->phy.ops.release(hw); + hw->phy.ops.release(hw); - if (ret_val) - return ret_val; - } + if (ret_val) + return ret_val; } /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; } - - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) */ @@ -1386,6 +1441,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; + /* FEXTNVM6 K1-off workaround */ + if (hw->mac.type == e1000_pch_spt) { + u32 pcieanacfg = er32(PCIEANACFG); + u32 fextnvm6 = er32(FEXTNVM6); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + ew32(FEXTNVM6, fextnvm6); + } + if (!link) return 0; /* No link detected */ @@ -1479,6 +1547,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -1929,6 +1998,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -2961,6 +3031,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) s32 ret_val; switch (hw->mac.type) { + /* In SPT, read from the CTRL_EXT reg instead of + * accessing the sector valid bits from the nvm + */ + case e1000_pch_spt: + *bank = er32(CTRL_EXT) + & E1000_CTRL_EXT_NVMVS; + if ((*bank == 0) || (*bank == 1)) { + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } else { + *bank = *bank - 2; + return 0; + } + break; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); @@ -3008,6 +3092,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) } /** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = + dev_spec->shadow_ram[offset + i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset + i].modified) || + !(dev_spec->shadow_ram[offset + i + 1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset + i].modified) + data[i] = + dev_spec->shadow_ram[offset + i].value; + else + data[i] = (u16)(dword & 0xFFFF); + if (dev_spec->shadow_ram[offset + i].modified) + data[i + 1] = + dev_spec->shadow_ram[offset + i + 1].value; + else + data[i + 1] = (u16)(dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. @@ -3090,8 +3267,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or @@ -3107,7 +3286,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { s32 i; @@ -3128,7 +3310,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); } else { e_dbg("Flash controller busy, cannot get access\n"); } @@ -3151,9 +3337,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { @@ -3170,6 +3363,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) } /** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + /* Must convert word offset into bytes. */ + offset <<= 1; + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location @@ -3201,7 +3411,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u16 word = 0; - ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type == e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) return ret_val; @@ -3287,6 +3504,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ + +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type != e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + *data = er32flash(ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. @@ -3321,7 +3614,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, } /** - * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * e1000_update_nvm_checksum_spt - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, @@ -3331,13 +3624,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ -static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; - u16 data; + u32 dword = 0; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) @@ -3371,12 +3664,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; } - - for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { /* Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + + /* Write the data to the new bank. Offset in words */ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword */ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword */ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { @@ -3498,6 +3954,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@ -3583,9 +4040,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (size < 1 || size > 2 || data > size * 0xff || - offset > ICH_FLASH_LINEAR_ADDR_MASK) - return -E1000_ERR_NVM; + if (hw->mac.type == e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); @@ -3596,12 +4057,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); @@ -3640,6 +4114,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ew32flash(ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. @@ -3656,6 +4214,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + + /* Must convert word offset into bytes. */ + offset <<= 1; + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. @@ -3759,9 +4351,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* Write the last 24 bits of an index within the * block into Flash Linear address field in Flash @@ -4180,7 +4781,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(RFCTL, reg); /* Enable ECC on Lynxpoint */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { reg = er32(PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; ew32(PBECCSTS, reg); @@ -4583,7 +5185,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || - (device_id == E1000_DEV_ID_PCH_I218_V3)) { + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); @@ -5058,6 +5661,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = { .write = e1000_write_nvm_ich8lan, }; +static const struct e1000_nvm_operations spt_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .read = e1000_read_nvm_spt, + .update = e1000_update_nvm_checksum_spt, + .reload = e1000e_reload_nvm_generic, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + const struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL @@ -5166,3 +5780,23 @@ const struct e1000_info e1000_pch_lpt_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; + +const struct e1000_info e1000_pch_spt_info = { + .mac = e1000_pch_spt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9018, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 8066a498eaac..770a573b9eea 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -95,9 +95,18 @@ #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1e8c40fd5c3d..c509a5c900f5 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pchlan] = &e1000_pch_info, [board_pch2lan] = &e1000_pch2_info, [board_pch_lpt] = &e1000_pch_lpt_info, + [board_pch_spt] = &e1000_pch_spt_info, }; struct e1000_reg_info { @@ -946,7 +947,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; buffer_info->skb = NULL; @@ -1231,7 +1232,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) (count < tx_ring->count)) { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ + dma_rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1331,7 +1332,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, break; (*work_done)++; skb = buffer_info->skb; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ /* in the packet split case this is header only */ prefetch(skb->data - NET_IP_ALIGN); @@ -1535,7 +1536,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; buffer_info->skb = NULL; @@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); @@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TCTL, tctl); hw->mac.ops.config_collision_dist(hw); + + /* SPT Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + u32 reg_val; + + reg_val = er32(IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + ew32(IOSFPC, reg_val); + + reg_val = er32(TARC(0)); + reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + ew32(TARC(0), reg_val); + } } /** @@ -3280,9 +3297,9 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ew32(RXDCTL(0), rxdctl | 0x3); } - pm_qos_update_request(&adapter->netdev->pm_qos_req, lat); + pm_qos_update_request(&adapter->pm_qos_req, lat); } else { - pm_qos_update_request(&adapter->netdev->pm_qos_req, + pm_qos_update_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); } @@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) struct e1000_hw *hw = &adapter->hw; u32 incvalue, incperiod, shift; - /* Make sure clock is enabled on I217 before checking the frequency */ - if ((hw->mac.type == e1000_pch_lpt) && + /* Make sure clock is enabled on I217/I218/I219 before checking + * the frequency + */ + if (((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); @@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - /* On I217, the clock frequency is 25MHz or 96MHz as - * indicated by the System Clock Frequency Indication + case e1000_pch_spt: + /* On I217, I218 and I219, the clock frequency is 25MHz + * or 96MHz as indicated by the System Clock Frequency + * Indication */ - if ((hw->mac.type != e1000_pch_lpt) || + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHz; @@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter) break; case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: fc->refresh_time = 0x0400; if (adapter->netdev->mtu <= ETH_DATA_LEN) { @@ -4060,6 +4084,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) */ set_bit(__E1000_DOWN, &adapter->state); + netif_carrier_off(netdev); + /* disable receives in the hardware */ rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) @@ -4084,8 +4110,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netif_carrier_off(netdev); - spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); spin_unlock(&adapter->stats64_lock); @@ -4379,7 +4403,7 @@ static int e1000_open(struct net_device *netdev) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ - pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); /* before we allocate an interrupt, we must be ready to handle it. @@ -4490,7 +4514,7 @@ static int e1000_close(struct net_device *netdev) !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter); - pm_qos_remove_request(&adapter->netdev->pm_qos_req); + pm_qos_remove_request(&adapter->pm_qos_req); pm_runtime_put_sync(&pdev->dev); @@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); /* Correctable ECC Errors */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. @@ -6807,7 +6833,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; if ((adapter->flags & FLAG_HAS_FLASH) && - (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && + (hw->mac.type < e1000_pch_spt)) { flash_start = pci_resource_start(pdev, 1); flash_len = pci_resource_len(pdev, 1); adapter->hw.flash_address = ioremap(flash_start, flash_len); @@ -6847,7 +6874,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_hw_init; if ((adapter->flags & FLAG_IS_ICH) && - (adapter->flags & FLAG_READ_ONLY_NVM)) + (adapter->flags & FLAG_READ_ONLY_NVM) && + (hw->mac.type < e1000_pch_spt)) e1000e_write_protect_nvm_ich8lan(&adapter->hw); hw->mac.ops.get_bus_info(&adapter->hw); @@ -7043,7 +7071,7 @@ err_hw_init: kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: - if (adapter->hw.flash_address) + if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); e1000e_reset_interrupt_capability(adapter); err_flashmap: @@ -7116,7 +7144,8 @@ static void e1000_remove(struct pci_dev *pdev) kfree(adapter->rx_ring); iounmap(adapter->hw.hw_addr); - if (adapter->hw.flash_address) + if ((adapter->hw.flash_address) && + (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); @@ -7213,6 +7242,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 978ef9c4a043..8d7b21dc7e19 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -106,20 +106,18 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) * Read the timecounter and return the correct value in ns after converting * it into a struct timespec. **/ -static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; - u32 remainder; u64 ns; spin_lock_irqsave(&adapter->systim_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->systim_lock, flags); - ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -133,14 +131,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * wall timer value. **/ static int e1000e_phc_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; u64 ns; - ns = timespec_to_ns(ts); + ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->systim_lock, flags); @@ -171,11 +169,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; - struct timespec ts; + struct timespec64 ts; - adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts); + adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); - e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + e_dbg("SYSTIM overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); schedule_delayed_work(&adapter->systim_overflow_work, E1000_SYSTIM_OVERFLOW_PERIOD); @@ -190,8 +189,8 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = { .pps = 0, .adjfreq = e1000e_phc_adjfreq, .adjtime = e1000e_phc_adjtime, - .gettime = e1000e_phc_gettime, - .settime = e1000e_phc_settime, + .gettime64 = e1000e_phc_gettime, + .settime64 = e1000e_phc_settime, .enable = e1000e_phc_enable, }; @@ -221,7 +220,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - if ((hw->mac.type != e1000_pch_lpt) || + case e1000_pch_spt: + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; break; diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index ea235bbe50d3..85eefc4832ba 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,6 +38,7 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ @@ -67,6 +68,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ @@ -121,6 +123,7 @@ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 42eb4344a9dc..c8c8c5baefda 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -235,6 +235,9 @@ struct fm10k_vxlan_port { __be16 port; }; +/* one work queue for entire driver */ +extern struct workqueue_struct *fm10k_workqueue; + struct fm10k_intfc { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; @@ -266,7 +269,6 @@ struct fm10k_intfc { u64 tx_csum_errors; u64 alloc_failed; u64 rx_csum_errors; - u64 rx_errors; u64 tx_bytes_nic; u64 tx_packets_nic; @@ -439,6 +441,7 @@ extern char fm10k_driver_name[]; extern const char fm10k_driver_version[]; int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); +__be16 fm10k_tx_encap_offload(struct sk_buff *skb); netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring); void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); @@ -457,6 +460,9 @@ void fm10k_down(struct fm10k_intfc *interface); void fm10k_update_stats(struct fm10k_intfc *interface); void fm10k_service_event_schedule(struct fm10k_intfc *interface); void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); +#ifdef CONFIG_NET_POLL_CONTROLLER +void fm10k_netpoll(struct net_device *netdev); +#endif /* Netdev */ struct net_device *fm10k_alloc_netdev(void); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index bf19dccd4288..6cfae6ac04ea 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, /* Retrieve RX Owner Data */ id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); - /* Process RX Ring*/ + /* Process RX Ring */ do { rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), &q->rx_drops); @@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, * Function invalidates the index values for the queues so any updates that * may have happened are ignored and the base for the queue stats is reset. **/ - void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) { u32 i; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 212a92dad222..5c7a4d7662d8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -128,7 +128,7 @@ static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) * * Returns that we support only IEEE DCB for this interface **/ -static u8 fm10k_dcbnl_getdcbx(struct net_device *dev) +static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev) { return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; } @@ -140,7 +140,7 @@ static u8 fm10k_dcbnl_getdcbx(struct net_device *dev) * * Returns error on attempt to enable anything but IEEE DCB for this interface **/ -static u8 fm10k_dcbnl_setdcbx(struct net_device *dev, u8 mode) +static u8 fm10k_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode) { return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 4327f86218b9..f45b4d71adb8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,14 +36,16 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos) return (*pos < ring->count) ? pos : NULL; } -static void *fm10k_dbg_desc_seq_next(struct seq_file *s, void *v, loff_t *pos) +static void *fm10k_dbg_desc_seq_next(struct seq_file *s, + void __always_unused *v, loff_t *pos) { struct fm10k_ring *ring = s->private; return (++(*pos) < ring->count) ? pos : NULL; } -static void fm10k_dbg_desc_seq_stop(struct seq_file *s, void *v) +static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s, + __always_unused void *v) { /* Do nothing. */ } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 651f53bc7376..4b9d9f88af70 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -57,13 +57,12 @@ static const struct fm10k_stats fm10k_gstrings_net_stats[] = { .stat_offset = offsetof(struct fm10k_intfc, _stat) \ } -static const struct fm10k_stats fm10k_gstrings_stats[] = { +static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("tx_restart_queue", restart_queue), FM10K_STAT("tx_busy", tx_busy), FM10K_STAT("tx_csum_errors", tx_csum_errors), FM10K_STAT("rx_alloc_failed", alloc_failed), FM10K_STAT("rx_csum_errors", rx_csum_errors), - FM10K_STAT("rx_errors", rx_errors), FM10K_STAT("tx_packets_nic", tx_packets_nic), FM10K_STAT("tx_bytes_nic", tx_bytes_nic), @@ -73,38 +72,42 @@ static const struct fm10k_stats fm10k_gstrings_stats[] = { FM10K_STAT("rx_overrun_pf", rx_overrun_pf), FM10K_STAT("rx_overrun_vf", rx_overrun_vf), - FM10K_STAT("timeout", stats.timeout.count), - FM10K_STAT("ur", stats.ur.count), - FM10K_STAT("ca", stats.ca.count), - FM10K_STAT("um", stats.um.count), - FM10K_STAT("xec", stats.xec.count), - FM10K_STAT("vlan_drop", stats.vlan_drop.count), - FM10K_STAT("loopback_drop", stats.loopback_drop.count), - FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), - FM10K_STAT("swapi_status", hw.swapi.status), FM10K_STAT("mac_rules_used", hw.swapi.mac.used), FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy), - FM10K_STAT("mbx_tx_dropped", hw.mbx.tx_dropped), + FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped), FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages), FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords), FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages), FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords), FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err), + FM10K_STAT("tx_hang_count", tx_timeout_count), + FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), }; -#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_stats) +static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { + FM10K_STAT("timeout", stats.timeout.count), + FM10K_STAT("ur", stats.ur.count), + FM10K_STAT("ca", stats.ca.count), + FM10K_STAT("um", stats.um.count), + FM10K_STAT("xec", stats.xec.count), + FM10K_STAT("vlan_drop", stats.vlan_drop.count), + FM10K_STAT("loopback_drop", stats.loopback_drop.count), + FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), +}; + +#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) +#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) -#define FM10K_QUEUE_STATS_LEN \ - (MAX_QUEUES * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) +#define FM10K_QUEUE_STATS_LEN(_n) \ + ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) -#define FM10K_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ - FM10K_NETDEV_STATS_LEN + \ - FM10K_QUEUE_STATS_LEN) +#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ + FM10K_NETDEV_STATS_LEN) static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { "Mailbox test (on/offline)" @@ -117,9 +120,9 @@ enum fm10k_self_test_types { FM10K_TEST_MAX = FM10K_TEST_LEN }; -static void fm10k_get_strings(struct net_device *dev, u32 stringset, - u8 *data) +static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct fm10k_intfc *interface = netdev_priv(dev); char *p = (char *)data; int i; @@ -135,12 +138,19 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, p += ETH_GSTRING_LEN; } for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_stats[i].stat_string, + memcpy(p, fm10k_gstrings_global_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < MAX_QUEUES; i++) { + if (interface->hw.mac.type != fm10k_mac_vf) + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < interface->hw.mac.max_queues; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); @@ -156,18 +166,28 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, static int fm10k_get_sset_count(struct net_device *dev, int sset) { + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + int stats_len = FM10K_STATIC_STATS_LEN; + switch (sset) { case ETH_SS_TEST: return FM10K_TEST_LEN; case ETH_SS_STATS: - return FM10K_STATS_LEN; + stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues); + + if (hw->mac.type != fm10k_mac_vf) + stats_len += FM10K_PF_STATS_LEN; + + return stats_len; default: return -EOPNOTSUPP; } } static void fm10k_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats __always_unused *stats, + u64 *data) { const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); struct fm10k_intfc *interface = netdev_priv(netdev); @@ -184,12 +204,21 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, } for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - p = (char *)interface + fm10k_gstrings_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_stats[i].sizeof_stat == + p = (char *)interface + + fm10k_gstrings_global_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (i = 0; i < MAX_QUEUES; i++) { + if (interface->hw.mac.type != fm10k_mac_vf) + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + p = (char *)interface + + fm10k_gstrings_pf_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (i = 0; i < interface->hw.mac.max_queues; i++) { struct fm10k_ring *ring; u64 *queue_stat; @@ -369,7 +398,7 @@ static void fm10k_get_drvinfo(struct net_device *dev, strncpy(info->bus_info, pci_name(interface->pdev), sizeof(info->bus_info) - 1); - info->n_stats = FM10K_STATS_LEN; + info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS); info->regdump_len = fm10k_get_regs_len(dev); } @@ -645,7 +674,7 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, } static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - u32 *rule_locs) + u32 __always_unused *rule_locs) { struct fm10k_intfc *interface = netdev_priv(dev); int ret = -EOPNOTSUPP; @@ -851,7 +880,7 @@ static void fm10k_self_test(struct net_device *dev, eth_test->flags |= ETH_TEST_FL_FAILED; } -static u32 fm10k_get_reta_size(struct net_device *netdev) +static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; } @@ -911,7 +940,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) return 0; } -static u32 fm10k_get_rssrk_size(struct net_device *netdev) +static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev) { return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; } @@ -1019,7 +1048,7 @@ static int fm10k_set_channels(struct net_device *dev, } static int fm10k_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct ethtool_ts_info *info) { struct fm10k_intfc *interface = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 060190864238..5b08e6284a3c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -47,7 +47,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) { struct fm10k_hw *hw = &interface->hw; struct fm10k_iov_data *iov_data; - s64 mbicr, vflre; + s64 vflre; int i; /* if there is no iov_data then there is no mailboxes to process */ @@ -63,7 +63,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) goto read_unlock; if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) - goto process_mbx; + goto read_unlock; /* read VFLRE to determine if any VFs have been reset */ do { @@ -86,32 +86,6 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) } } while (i != iov_data->num_vfs); -process_mbx: - /* read MBICR to determine which VFs require attention */ - mbicr = fm10k_read_reg(hw, FM10K_MBICR(1)); - mbicr <<= 32; - mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0)); - - i = iov_data->next_vf_mbx ? : iov_data->num_vfs; - - for (mbicr <<= 64 - i; i--; mbicr += mbicr) { - struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx; - - if (mbicr >= 0) - continue; - - if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) - break; - - mbx->ops.process(hw, mbx); - } - - if (i >= 0) { - iov_data->next_vf_mbx = i + 1; - } else if (iov_data->next_vf_mbx) { - iov_data->next_vf_mbx = 0; - goto process_mbx; - } read_unlock: rcu_read_unlock(); @@ -139,6 +113,13 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface) /* lock the mailbox for transmit and receive */ fm10k_mbx_lock(interface); + /* Most VF messages sent to the PF cause the PF to respond by + * requesting from the SM mailbox. This means that too many VF + * messages processed at once could cause a mailbox timeout on the PF. + * To prevent this, store a pointer to the next VF mbx to process. Use + * that as the start of the loop so that we don't starve whichever VF + * got ignored on the previous run. + */ process_mbx: for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; @@ -155,10 +136,6 @@ process_mbx: mbx->ops.connect(hw, mbx); } - /* no work pending, then just continue */ - if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx)) - continue; - /* guarantee we have free space in the SM mailbox */ if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) break; @@ -167,6 +144,10 @@ process_mbx: mbx->ops.process(hw, mbx); } + /* if we stopped processing mailboxes early, update next_vf_mbx. + * Otherwise, reset next_vf_mbx, and restart loop so that we process + * the remaining mailboxes we skipped at the start. + */ if (i >= 0) { iov_data->next_vf_mbx = i + 1; } else if (iov_data->next_vf_mbx) { @@ -275,7 +256,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) if (vf_idx >= iov_data->num_vfs) return FM10K_ERR_PARAM; - /* determine if an update has occured and if so notify the VF */ + /* determine if an update has occurred and if so notify the VF */ vf_info = &iov_data->vf_info[vf_idx]; if (vf_info->sw_vid != pvid) { vf_info->sw_vid = pvid; @@ -488,8 +469,8 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, return 0; } -int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused, - int rate) +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, + int __always_unused unused, int rate) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 84ab9eea2768..1b0661e3573b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.12.2-k" +#define DRV_VERSION "0.15.2-k" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = @@ -41,6 +41,9 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +/* single workqueue for entire fm10k driver */ +struct workqueue_struct *fm10k_workqueue = NULL; + /** * fm10k_init_module - Driver Registration Routine * @@ -52,6 +55,10 @@ static int __init fm10k_init_module(void) pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); pr_info("%s\n", fm10k_copyright); + /* create driver workqueue */ + if (!fm10k_workqueue) + fm10k_workqueue = create_workqueue("fm10k"); + fm10k_dbg_init(); return fm10k_register_pci_driver(); @@ -69,6 +76,11 @@ static void __exit fm10k_exit_module(void) fm10k_unregister_pci_driver(); fm10k_dbg_exit(); + + /* destroy driver workqueue */ + flush_workqueue(fm10k_workqueue); + destroy_workqueue(fm10k_workqueue); + fm10k_workqueue = NULL; } module_exit(fm10k_exit_module); @@ -209,7 +221,7 @@ static inline bool fm10k_page_is_reserved(struct page *page) static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, struct page *page, - unsigned int truesize) + unsigned int __maybe_unused truesize) { /* avoid re-using remote pages */ if (unlikely(fm10k_page_is_reserved(page))) @@ -240,7 +252,6 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, /** * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into @@ -253,8 +264,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, * The function will then update the page offset if necessary and return * true if the buffer can be reused by the interface. **/ -static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring, - struct fm10k_rx_buffer *rx_buffer, +static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { @@ -330,7 +340,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, DMA_FROM_DEVICE); /* pull page into skb */ - if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { /* hand second half of page back to the ring */ fm10k_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -412,7 +422,7 @@ static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, } static void fm10k_type_trans(struct fm10k_ring *rx_ring, - union fm10k_rx_desc *rx_desc, + union fm10k_rx_desc __maybe_unused *rx_desc, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; @@ -509,8 +519,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, /** * fm10k_pull_tail - fm10k specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being adjusted * * This function is an fm10k specific version of __pskb_pull_tail. The @@ -520,9 +528,7 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. */ -static void fm10k_pull_tail(struct fm10k_ring *rx_ring, - union fm10k_rx_desc *rx_desc, - struct sk_buff *skb) +static void fm10k_pull_tail(struct sk_buff *skb) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; @@ -576,7 +582,7 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, /* place header in linear portion of buffer */ if (skb_is_nonlinear(skb)) - fm10k_pull_tail(rx_ring, rx_desc, skb); + fm10k_pull_tail(skb); /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) @@ -711,10 +717,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) return NULL; - /* verify protocol is transparent Ethernet bridging */ - if (nvgre_hdr->proto != htons(ETH_P_TEB)) - return NULL; - /* report start of ethernet header */ if (nvgre_hdr->flags & NVGRE_TNI) return (struct ethhdr *)(nvgre_hdr + 1); @@ -722,15 +724,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) return (struct ethhdr *)(&nvgre_hdr->tni); } -static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) +__be16 fm10k_tx_encap_offload(struct sk_buff *skb) { + u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; struct ethhdr *eth_hdr; - u8 l4_hdr = 0; -/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */ -#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164 - if (skb_inner_transport_header(skb) - skb_mac_header(skb) > - FM10K_MAX_ENCAP_TRANSPORT_OFFSET) + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) return 0; switch (vlan_get_protocol(skb)) { @@ -760,12 +760,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) switch (eth_hdr->h_proto) { case htons(ETH_P_IP): + inner_l4_hdr = inner_ip_hdr(skb)->protocol; + break; case htons(ETH_P_IPV6): + inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; + break; + default: + return 0; + } + + switch (inner_l4_hdr) { + case IPPROTO_TCP: + inner_l4_hlen = inner_tcp_hdrlen(skb); + break; + case IPPROTO_UDP: + inner_l4_hlen = 8; break; default: return 0; } + /* The hardware allows tunnel offloads only if the combined inner and + * outer header is 184 bytes or less + */ + if (skb_inner_transport_header(skb) + inner_l4_hlen - + skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) + return 0; + return eth_hdr->h_proto; } @@ -934,10 +955,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ smp_mb(); - /* We need to check again in a case another CPU has just - * made room available. */ + /* Check again in a case another CPU has just made room available */ if (likely(fm10k_desc_unused(tx_ring) < size)) return -EBUSY; @@ -1182,7 +1203,6 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) { /* Do the reset outside of interrupt context */ if (!test_bit(__FM10K_DOWN, &interface->state)) { - netdev_err(interface->netdev, "Reset interface\n"); interface->tx_timeout_count++; interface->flags |= FM10K_FLAG_RESET_REQUESTED; fm10k_service_event_schedule(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 9f5457c9e627..1b2738380518 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) * @fifo: pointer to FIFO * @offset: offset to add to head * - * This function returns the indicies into the fifo based on head + offset + * This function returns the indices into the fifo based on head + offset **/ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) * @fifo: pointer to FIFO * @offset: offset to add to tail * - * This function returns the indicies into the fifo based on tail + offset + * This function returns the indices into the fifo based on tail + offset **/ static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -126,6 +126,18 @@ static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) } /** + * fm10k_fifo_drop_all - Drop all messages in FIFO + * @fifo: pointer to FIFO + * + * This function resets the head pointer to drop all messages in the FIFO, + * and ensure the FIFO is empty. + **/ +static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo) +{ + fifo->head = fifo->tail; +} + +/** * fm10k_mbx_index_len - Convert a head/tail index into a length value * @mbx: pointer to mailbox * @head: head index @@ -315,7 +327,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) } while (total_len < len); /* message extends out of pushed section, but fits in FIFO */ - if ((len < total_len) && (msg_len <= mbx->rx.size)) + if ((len < total_len) && (msg_len <= mbx->max_size)) return 0; /* return length of invalid section */ @@ -326,8 +338,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem * @mbx: pointer to mailbox * - * This function will take a seciton of the Rx FIFO and copy it into the - mbx->tail--; + * This function will take a section of the Tx FIFO and copy it into the * mailbox memory. The offset in mbmem is based on the lower bits of the * tail and len determines the length to copy. **/ @@ -418,7 +429,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw, * @hw: pointer to hardware structure * @mbx: pointer to mailbox * - * This function will take a seciton of the mailbox memory and copy it + * This function will take a section of the mailbox memory and copy it * into the Rx FIFO. The offset is based on the lower bits of the * head and len determines the length to copy. **/ @@ -464,7 +475,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, * @tail: tail index of message * * This function will first validate the tail index and size for the - * incoming message. It then updates the acknowlegment number and + * incoming message. It then updates the acknowledgment number and * copies the data into the FIFO. It will return the number of messages * dequeued on success and a negative value on error. **/ @@ -761,7 +772,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, err = fm10k_fifo_enqueue(&mbx->tx, msg); } - /* if we failed trhead the error */ + /* if we failed treat the error */ if (err) { mbx->timeout = 0; mbx->tx_busy++; @@ -815,10 +826,10 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) { u32 mbmem = mbx->mbmem_reg; - /* write new msg header to notify recepient of change */ + /* write new msg header to notify recipient of change */ fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); - /* write mailbox to sent interrupt */ + /* write mailbox to send interrupt */ if (mbx->mbx_lock) fm10k_write_reg(hw, mbx->mbx_reg, mbx->mbx_lock); @@ -1052,8 +1063,11 @@ static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) * @mbx: pointer to mailbox * @size: new value for max_size * - * This function will update the max_size value and drop any outgoing messages - * from the head of the Tx FIFO that are larger than max_size. + * This function updates the max_size value and drops any outgoing messages + * at the head of the Tx FIFO if they are larger than max_size. It does not + * drop all messages, as this is too difficult to parse and remove them from + * the FIFO. Instead, rely on the checking to ensure that messages larger + * than max_size aren't pushed into the memory buffer. **/ static void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size) { @@ -1251,7 +1265,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, /* we will need to pull all of the fields for verification */ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); - /* we only have lower 10 bits of error number os add upper bits */ + /* we only have lower 10 bits of error number so add upper bits */ err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); @@ -1371,9 +1385,11 @@ static void fm10k_mbx_disconnect(struct fm10k_hw *hw, timeout -= FM10K_MBX_POLL_DELAY; } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); - /* in case we didn't close just force the mailbox into shutdown */ + /* in case we didn't close, just force the mailbox into shutdown and + * drop all left over messages in the FIFO. + */ fm10k_mbx_connect_reset(mbx); - fm10k_mbx_update_max_size(mbx, 0); + fm10k_fifo_drop_all(&mbx->tx); fm10k_write_reg(hw, mbx->mbmem_reg, 0); } @@ -1548,7 +1564,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, mbx->timeout = 0; mbx->udelay = FM10K_MBX_INIT_DELAY; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; @@ -1627,7 +1643,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) mbx->local = FM10K_SM_MBX_VERSION; mbx->remote = 0; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index cfde8bac1aeb..2f4f41b7eae7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) * fm10k_request_glort_range - Request GLORTs for use in configuring rules * @interface: board private structure * - * This function allocates a range of glorts for this inteface to use. + * This function allocates a range of glorts for this interface to use. **/ static void fm10k_request_glort_range(struct fm10k_intfc *interface) { @@ -770,18 +770,18 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (hw->mac.vlan_override) return -EACCES; - /* if default VLAN is already present do nothing */ - if (vid == hw->mac.default_vid) - return -EBUSY; - /* update active_vlans bitmask */ set_bit(vid, interface->active_vlans); if (!set) clear_bit(vid, interface->active_vlans); + /* if default VLAN is already present do nothing */ + if (vid == hw->mac.default_vid) + return 0; + fm10k_mbx_lock(interface); - /* only need to update the VLAN if not in promiscous mode */ + /* only need to update the VLAN if not in promiscuous mode */ if (!(netdev->flags & IFF_PROMISC)) { err = hw->mac.ops.update_vlan(hw, vid, 0, set); if (err) @@ -970,14 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev) fm10k_mbx_lock(interface); - /* syncronize all of the addresses */ - if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { - __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); - if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) - __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); - } - - /* if we aren't changing modes there is nothing to do */ + /* update xcast mode first, but only if it changed */ if (interface->xcast_mode != xcast_mode) { /* update VLAN table */ if (xcast_mode == FM10K_XCAST_MODE_PROMISC) @@ -992,6 +985,13 @@ static void fm10k_set_rx_mode(struct net_device *dev) interface->xcast_mode = xcast_mode; } + /* synchronize all of the addresses */ + if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { + __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); + if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) + __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); + } + fm10k_mbx_unlock(interface); } @@ -1051,16 +1051,16 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) vid, true, 0); } - /* syncronize all of the addresses */ + /* update xcast mode before syncronizing addresses */ + hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); + + /* synchronize all of the addresses */ if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); } - /* update xcast mode */ - hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); - fm10k_mbx_unlock(interface); /* record updated xcast mode state */ @@ -1126,7 +1126,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, } for (i = 0; i < interface->num_tx_queues; i++) { - ring = ACCESS_ONCE(interface->rx_ring[i]); + ring = ACCESS_ONCE(interface->tx_ring[i]); if (!ring) continue; @@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) } } +static netdev_features_t fm10k_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) + return features; + + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); +} + static const struct net_device_ops fm10k_netdev_ops = { .ndo_open = fm10k_open, .ndo_stop = fm10k_close, @@ -1372,6 +1382,10 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_do_ioctl = fm10k_ioctl, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fm10k_netpoll, +#endif + .ndo_features_check = fm10k_features_check, }; #define DEFAULT_DEBUG_LEVEL_SHIFT 3 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4f5892cc32d7..df9fda38bdd1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -94,7 +94,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface) { if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) && !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state)) - schedule_work(&interface->service_task); + queue_work(fm10k_workqueue, &interface->service_task); } static void fm10k_service_event_complete(struct fm10k_intfc *interface) @@ -191,7 +191,6 @@ static void fm10k_reset_subtask(struct fm10k_intfc *interface) interface->flags &= ~FM10K_FLAG_RESET_REQUESTED; netdev_err(interface->netdev, "Reset interface\n"); - interface->tx_timeout_count++; fm10k_reinit(interface); } @@ -357,11 +356,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface) net_stats->rx_packets = pkts; interface->alloc_failed = alloc_failed; interface->rx_csum_errors = rx_csum_errors; - interface->rx_errors = rx_errors; hw->mac.ops.update_hw_stats(hw, &interface->stats); - for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { + for (i = 0; i < hw->mac.max_queues; i++) { struct fm10k_hw_stats_q *q = &interface->stats.q[i]; tx_bytes_nic += q->tx_bytes.count; @@ -378,7 +376,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface) interface->rx_drops_nic = rx_drops_nic; /* Fill out the OS statistics structure */ - net_stats->rx_errors = interface->stats.xec.count; + net_stats->rx_errors = rx_errors; net_stats->rx_dropped = interface->stats.nodesc_drop.count; } @@ -648,7 +646,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* Configure the Rx buffer size for one buff without split */ srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; - /* Configure the Rx ring to supress loopback packets */ + /* Configure the Rx ring to suppress loopback packets */ srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); @@ -808,7 +806,7 @@ static void fm10k_napi_enable_all(struct fm10k_intfc *interface) } } -static irqreturn_t fm10k_msix_clean_rings(int irq, void *data) +static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) { struct fm10k_q_vector *q_vector = data; @@ -818,7 +816,7 @@ static irqreturn_t fm10k_msix_clean_rings(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t fm10k_msix_mbx_vf(int irq, void *data) +static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) { struct fm10k_intfc *interface = data; struct fm10k_hw *hw = &interface->hw; @@ -840,6 +838,28 @@ static irqreturn_t fm10k_msix_mbx_vf(int irq, void *data) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * fm10k_netpoll - A Polling 'interrupt' handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +void fm10k_netpoll(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__FM10K_DOWN, &interface->state)) + return; + + for (i = 0; i < interface->num_q_vectors; i++) + fm10k_msix_clean_rings(0, interface->q_vector[i]); +} + +#endif #define FM10K_ERR_MSG(type) case (type): error = #type; break static void fm10k_print_fault(struct fm10k_intfc *interface, int type, struct fm10k_fault *fault) @@ -964,7 +984,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) } } -static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data) +static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) { struct fm10k_intfc *interface = data; struct fm10k_hw *hw = &interface->hw; @@ -986,6 +1006,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data) /* service mailboxes */ if (fm10k_mbx_trylock(interface)) { mbx->ops.process(hw, mbx); + /* handle VFLRE events */ fm10k_iov_event(interface); fm10k_mbx_unlock(interface); } @@ -1002,6 +1023,8 @@ static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data) /* we should validate host state after interrupt event */ hw->mac.get_host_state = 1; + + /* validate host state, and handle VF mailboxes in the service task */ fm10k_service_event_schedule(interface); /* re-enable mailbox interrupt and indicate 20us delay */ @@ -1069,7 +1092,7 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, } static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_intfc *interface; u64 timestamp; @@ -1089,7 +1112,7 @@ static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, /* generic error handler for mailbox issues */ static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_intfc *interface; struct pci_dev *pdev; @@ -1165,7 +1188,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, } static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_intfc *interface; u16 glort, pvid; @@ -1206,7 +1229,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, } static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_swapi_1588_timestamp timestamp; struct fm10k_iov_data *iov_data; @@ -1488,7 +1511,7 @@ void fm10k_up(struct fm10k_intfc *interface) /* enable transmits */ netif_tx_start_all_queues(interface->netdev); - /* kick off the service timer */ + /* kick off the service timer now */ hw->mac.get_host_state = 1; mod_timer(&interface->service_timer, jiffies); } @@ -1528,8 +1551,6 @@ void fm10k_down(struct fm10k_intfc *interface) /* disable polling routines */ fm10k_napi_disable_all(interface); - del_timer_sync(&interface->service_timer); - /* capture stats one last time before stopping interface */ fm10k_update_stats(interface); @@ -1655,6 +1676,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, (unsigned long)interface); INIT_WORK(&interface->service_task, fm10k_service_task); + /* kick off service timer now, even when interface is down */ + mod_timer(&interface->service_timer, (HZ * 2) + jiffies); + /* Intitialize timestamp data */ fm10k_ts_init(interface); @@ -1871,6 +1895,8 @@ static void fm10k_remove(struct pci_dev *pdev) struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; + del_timer_sync(&interface->service_timer); + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); cancel_work_sync(&interface->service_task); @@ -1984,7 +2010,8 @@ static int fm10k_resume(struct pci_dev *pdev) * a sleep state. The fm10k hardware does not support wake on lan so the * driver simply needs to shut down the device so it is in a low power state. **/ -static int fm10k_suspend(struct pci_dev *pdev, pm_message_t state) +static int fm10k_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 7e4711958e46..891e21874b2a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) vid = (vid << 17) >> 17; /* verify the reserved 0 fields are 0 */ - if (len >= FM10K_VLAN_TABLE_VID_MAX || - vid >= FM10K_VLAN_TABLE_VID_MAX) + if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* Loop through the table updating all required VLANs */ @@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_xc_addr_pf - Update device addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -330,6 +329,9 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, struct fm10k_mac_update mac_update; u32 msg[5]; + /* clear set bit from VLAN ID */ + vid &= ~FM10K_VLAN_CLEAR; + /* if glort or vlan are not valid return error */ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -356,7 +358,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_uc_addr_pf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -454,7 +456,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) break; } - /* always reset VFITR2[0] to point to last enabled PF vector*/ + /* always reset VFITR2[0] to point to last enabled PF vector */ fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); /* reset ITR2[0] to point to last enabled PF vector */ @@ -677,7 +679,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, /* loop through unallocated rings assigning them back to PF */ for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) { fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); - fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | vid); + fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | + FM10K_TXQCTL_UNLIMITED_BW | vid); fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF); } @@ -812,7 +815,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) if (vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -951,7 +954,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, if (vf_info->mbx.ops.disconnect) vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -1035,7 +1038,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, ((u32)vf_info->mac[2]); } - /* map queue pairs back to VF from last to first*/ + /* map queue pairs back to VF from last to first */ for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); @@ -1141,7 +1144,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, * * This function is a default handler for MSI-X requests from the VF. The * assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1160,7 +1163,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, * * This function is a default handler for MAC/VLAN requests from the VF. * The assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1250,8 +1253,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, } /* notify switch of request for new multicast address */ - err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, - !(vlan & FM10K_VLAN_CLEAR), 0); + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan, + !(vlan & FM10K_VLAN_CLEAR)); } return err; @@ -1404,7 +1407,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, &stats->vlan_drop); loopback_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_LOOPBACK_DROP, - &stats->loopback_drop); + &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); @@ -1573,7 +1576,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) s32 ret_val = 0; u32 dma_ctrl2; - /* verify the switch is ready for interraction */ + /* verify the switch is ready for interaction */ dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) goto out; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c index d966044e017a..9043633c3e50 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -285,7 +285,7 @@ static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct fm10k_intfc *interface; unsigned long flags; @@ -297,17 +297,17 @@ static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) now = fm10k_systime_read(interface) + interface->ptp_adjust; read_unlock_irqrestore(&interface->systime_lock, flags); - *ts = ns_to_timespec(now); + *ts = ns_to_timespec64(now); return 0; } static int fm10k_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct fm10k_intfc *interface; unsigned long flags; - u64 ns = timespec_to_ns(ts); + u64 ns = timespec64_to_ns(ts); interface = container_of(ptp, struct fm10k_intfc, ptp_caps); @@ -319,7 +319,8 @@ static int fm10k_ptp_settime(struct ptp_clock_info *ptp, } static int fm10k_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) + struct ptp_clock_request *rq, + int __always_unused on) { struct ptp_clock_time *t = &rq->perout.period; struct fm10k_intfc *interface; @@ -419,8 +420,8 @@ void fm10k_ptp_register(struct fm10k_intfc *interface) ptp_caps->max_adj = 976562; ptp_caps->adjfreq = fm10k_ptp_adjfreq; ptp_caps->adjtime = fm10k_ptp_adjtime; - ptp_caps->gettime = fm10k_ptp_gettime; - ptp_caps->settime = fm10k_ptp_settime; + ptp_caps->gettime64 = fm10k_ptp_gettime; + ptp_caps->settime64 = fm10k_ptp_settime; /* provide pins if BAR4 is accessible */ if (interface->sw_addr) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index fd0a05f011a8..9b29d7b0377a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) /** * fm10k_tlv_msg_test - Validate all results on test message receive * @hw: Pointer to hardware structure - * @results: Pointer array to attributes in the mesage + * @results: Pointer array to attributes in the message * @mbx: Pointer to mailbox information structure * * This function does a check to verify all attributes match what the test diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 7c6d9d5a8ae5..4af96686c584 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -356,6 +356,9 @@ struct fm10k_hw; #define FM10K_QUEUE_DISABLE_TIMEOUT 100 #define FM10K_RESET_TIMEOUT 150 +/* Maximum supported combined inner and outer header length for encapsulation */ +#define FM10K_TUNNEL_HEADER_LENGTH 184 + /* VF registers */ #define FM10K_VFCTRL 0x00000 #define FM10K_VFCTRL_RST 0x00000008 @@ -593,7 +596,7 @@ struct fm10k_vf_info { u16 sw_vid; /* Switch API assigned VLAN */ u16 pf_vid; /* PF assigned Default VLAN */ u8 mac[ETH_ALEN]; /* PF Default MAC address */ - u8 vsi; /* VSI idenfifier */ + u8 vsi; /* VSI identifier */ u8 vf_idx; /* which VF this is */ u8 vf_flags; /* flags indicating what modes * are supported for the port diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index f0aa0f97b4a9..94f0f6a146d9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) if (err) return err; - /* If permenant address is set then we need to restore it */ + /* If permanent address is set then we need to restore it */ if (is_valid_ether_addr(perm_addr)) { bal = (((u32)perm_addr[3]) << 24) | (((u32)perm_addr[4]) << 16) | @@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) * fm10k_reset_hw_vf - VF hardware reset * @hw: pointer to hardware structure * - * This function should return the hardare to a state similar to the + * This function should return the hardware to a state similar to the * one it is in after just being initialized. **/ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) @@ -124,6 +124,10 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) /* record maximum queue count */ hw->mac.max_queues = i; + /* fetch default VLAN */ + hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) & + FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + return 0; } @@ -252,7 +256,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) } /** - * fm10k_update_uc_addr_vf - Update device unicast address + * fm10k_update_uc_addr_vf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -282,7 +286,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -295,7 +299,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_mc_addr_vf - Update device multicast address + * fm10k_update_mc_addr_vf - Update device multicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -319,7 +323,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, if (!is_multicast_ether_addr(mac)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -515,7 +519,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) * @hw: pointer to the hardware structure * * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanosecods. In order to guarantee the value is accurate + * value measured in nanoseconds. In order to guarantee the value is accurate * we check the 32 most significant bits both before and after reading the * 32 least significant bits to verify they didn't change as we were reading * the registers. diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index c40581999121..b4729ba57c9c 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 - 2014 Intel Corporation. +# Copyright(c) 2013 - 2015 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2b65cdcad6ba..33c35d3b7420 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,6 +36,7 @@ #include <linux/aer.h> #include <linux/netdevice.h> #include <linux/ioport.h> +#include <linux/iommu.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> @@ -49,6 +50,7 @@ #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> @@ -70,6 +72,7 @@ #define I40E_MAX_NUM_DESCRIPTORS 4096 #define I40E_MAX_REGISTER 0x800000 +#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 #define I40E_MIN_NUM_DESCRIPTORS 64 @@ -94,6 +97,9 @@ #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) +/* Ethtool Private Flags */ +#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) + #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 @@ -140,6 +146,7 @@ enum i40e_state_t { __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED, + __I40E_EMP_RESET_INTR_RECEIVED, __I40E_FILTER_OVERFLOW_PROMISC, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, @@ -168,6 +175,9 @@ struct i40e_lump_tracking { #define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 #define I40E_FDIR_BUFFER_FULL_MARGIN 10 #define I40E_FDIR_BUFFER_HEAD_ROOM 32 +#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) + +#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -232,17 +242,17 @@ struct i40e_pf { bool fc_autoneg_status; u16 eeprom_version; - u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */ + u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ - u16 num_req_vfs; /* num vfs requested for this vf */ - u16 num_vf_qps; /* num queue pairs per vf */ + u16 num_req_vfs; /* num VFs requested for this VF */ + u16 num_vf_qps; /* num queue pairs per VF */ #ifdef I40E_FCOE - u16 num_fcoe_qps; /* num fcoe queues this pf has set up */ + u16 num_fcoe_qps; /* num fcoe queues this PF has set up */ u16 num_fcoe_msix; /* num queue vectors per fcoe pool */ #endif /* I40E_FCOE */ - u16 num_lan_qps; /* num lan queues this pf has set up */ - u16 num_lan_msix; /* num queue vectors for the base pf vsi */ + u16 num_lan_qps; /* num lan queues this PF has set up */ + u16 num_lan_msix; /* num queue vectors for the base PF vsi */ int queues_left; /* queues left unclaimed */ u16 rss_size; /* num queues in the RSS array */ u16 rss_size_max; /* HW defined max RSS queues */ @@ -269,7 +279,7 @@ struct i40e_pf { enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; - u16 msg_enable; + u32 msg_enable; char int_name[I40E_INT_NAME_STR_LEN]; u16 adminq_work_limit; /* num of admin receive queue desc to process */ unsigned long service_timer_period; @@ -383,6 +393,9 @@ struct i40e_pf { bool ptp_tx; bool ptp_rx; u16 rss_table_size; + /* These are only valid in NPAR modes */ + u32 npar_max_bw; + u32 npar_min_bw; }; struct i40e_mac_filter { @@ -405,6 +418,7 @@ struct i40e_veb { u16 uplink_seid; u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; + u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ u16 flags; u16 bw_limit; u8 bw_max_quanta; @@ -461,6 +475,9 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; + u16 rss_table_size; + u16 rss_size; + u16 max_frame; u16 rx_hdr_len; u16 rx_buf_len; @@ -478,6 +495,7 @@ struct i40e_vsi { u16 base_queue; /* vsi's first queue in hw array */ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ @@ -504,6 +522,9 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + /* current rxnfc data */ + struct ethtool_rxnfc rxnfc; /* current rss hash opts */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -544,14 +565,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw) static char buf[32]; snprintf(buf, sizeof(buf), - "f%d.%d a%d.%d n%02x.%02x e%08x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + "f%d.%d.%05d a%d.%d n%x.%02x e%x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - hw->nvm.eetrack); + (hw->nvm.eetrack & 0xffffff)); return buf; } @@ -593,7 +614,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) /** * i40e_get_fd_cnt_all - get the total FD filter space available - * @pf: pointer to the pf struct + * @pf: pointer to the PF struct **/ static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) { @@ -607,6 +628,7 @@ extern const char i40e_driver_name[]; extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_eth_stats(struct i40e_vsi *vsi); struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); @@ -618,9 +640,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, bool add); void i40e_fdir_check_and_reenable(struct i40e_pf *pf); -int i40e_get_current_fd_count(struct i40e_pf *pf); -int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); -int i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_current_fd_count(struct i40e_pf *pf); +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_global_fd_count(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, @@ -680,6 +703,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev, int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid); #endif +int i40e_open(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); @@ -690,7 +714,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE -int i40e_open(struct net_device *netdev); int i40e_close(struct net_device *netdev); int i40e_setup_tc(struct net_device *netdev, u8 tc); void i40e_netpoll(struct net_device *netdev); @@ -712,6 +735,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); #ifdef CONFIG_I40E_DCB void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg); void i40e_dcbnl_set_all(struct i40e_vsi *vsi); void i40e_dcbnl_setup(struct i40e_vsi *vsi); @@ -727,4 +751,8 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 77f6254a89ac..3e0d20037675 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) ret_code = i40e_aq_get_firmware_version(hw, &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver, + &hw->aq.fw_build, &hw->aq.api_maj_ver, &hw->aq.api_min_ver, NULL); @@ -605,7 +606,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_free_arq; /* get the NVM version info */ - i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version); + i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, + &hw->nvm.version); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index de17b6fbcc4e..28e519a50de4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 11a9ffebf8d8..0bae22da014d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -51,6 +51,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -85,46 +86,53 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = le16_to_cpu(aq_desc->datalen); - u8 *aq_buffer = (u8 *)buffer; - u32 data[4]; - u32 i = 0; + u8 *buf = (u8 *)buffer; + u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { - memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - for (i = 0; i < len; i++) { - data[((i % 16) / 4)] |= - ((u32)aq_buffer[i]) << (8 * (i % 4)); - if ((i % 16) == 15) { - i40e_debug(hw, mask, - "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); - memset(data, 0, sizeof(data)); - } + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); } - if ((i % 16) != 0) - i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); } } @@ -534,7 +542,6 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; - /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure @@ -685,7 +692,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure - * @queue: target pf queue index + * @queue: target PF queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable @@ -827,12 +834,15 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_40GBASE_AOC: + case I40E_PHY_TYPE_10GBASE_AOC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: media = I40E_MEDIA_TYPE_BACKPLANE; break; case I40E_PHY_TYPE_SGMII: @@ -849,7 +859,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) } #define I40E_PF_RESET_WAIT_COUNT_A0 200 -#define I40E_PF_RESET_WAIT_COUNT 110 +#define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure @@ -868,8 +878,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ - grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK - >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; for (cnt = 0; cnt < grst_del + 2; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) @@ -946,7 +957,7 @@ void i40e_clear_hw(struct i40e_hw *hw) u32 val; u32 eol = 0x7ff; - /* get number of interrupts, queues, and vfs */ + /* get number of interrupts, queues, and VFs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; @@ -1075,8 +1086,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) return gpio_val; } -#define I40E_LED0 22 +#define I40E_COMBINED_ACTIVITY 0xA +#define I40E_FILTER_ACTIVITY 0xE #define I40E_LINK_ACTIVITY 0xC +#define I40E_MAC_ACTIVITY 0xD +#define I40E_LED0 22 /** * i40e_led_get - return current on/off mode @@ -1089,6 +1103,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) **/ u32 i40e_led_get(struct i40e_hw *hw) { + u32 current_mode = 0; u32 mode = 0; int i; @@ -1101,6 +1116,20 @@ u32 i40e_led_get(struct i40e_hw *hw) if (!gpio_val) continue; + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; @@ -1120,6 +1149,7 @@ u32 i40e_led_get(struct i40e_hw *hw) **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { + u32 current_mode = 0; int i; if (mode & 0xfffffff0) @@ -1134,6 +1164,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) if (!gpio_val) continue; + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & @@ -1297,14 +1341,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; @@ -1440,6 +1484,10 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, else hw_link_info->lse_enable = false; + if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && + hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + /* save link status information */ if (link) *link = *hw_link_info; @@ -1452,35 +1500,6 @@ aq_get_link_info_exit: } /** - * i40e_update_link_info - * @hw: pointer to the hw struct - * @enable_lse: enable/disable LinkStatusEvent reporting - * - * Returns the link status of the adapter - **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) -{ - struct i40e_aq_get_phy_abilities_resp abilities; - i40e_status status; - - status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL); - if (status) - return status; - - status = i40e_aq_get_phy_capabilities(hw, false, false, - &abilities, NULL); - if (status) - return status; - - if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED) - hw->phy.link_info.an_enabled = true; - else - hw->phy.link_info.an_enabled = false; - - return status; -} - -/** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set @@ -1759,6 +1778,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version + * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL @@ -1767,6 +1787,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, **/ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { @@ -1780,13 +1801,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { - if (fw_major_version != NULL) + if (fw_major_version) *fw_major_version = le16_to_cpu(resp->fw_major); - if (fw_minor_version != NULL) + if (fw_minor_version) *fw_minor_version = le16_to_cpu(resp->fw_minor); - if (api_major_version != NULL) + if (fw_build) + *fw_build = le32_to_cpu(resp->fw_build); + if (api_major_version) *api_major_version = le16_to_cpu(resp->api_major); - if (api_minor_version != NULL) + if (api_minor_version) *api_minor_version = le16_to_cpu(resp->api_minor); } @@ -1816,7 +1839,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI); + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; @@ -1996,7 +2019,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; - buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data); + buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); @@ -2038,7 +2061,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; - buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data); + buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); @@ -2060,7 +2083,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure - * @vfid: vf id to send msg + * @vfid: VF id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer @@ -2105,7 +2128,7 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, * Read the register using the admin queue commands **/ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, - u32 reg_addr, u64 *reg_val, + u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -2116,17 +2139,15 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, if (reg_val == NULL) return I40E_ERR_PARAM; - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_debug_read_reg); + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); cmd_resp->address = cpu_to_le32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { - *reg_val = ((u64)cmd_resp->value_high << 32) | - (u64)cmd_resp->value_low; - *reg_val = le64_to_cpu(*reg_val); + *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | + (u64)le32_to_cpu(cmd_resp->value_low); } return status; @@ -2376,6 +2397,7 @@ i40e_aq_erase_nvm_exit: #define I40E_DEV_FUNC_CAP_LED 0x61 #define I40E_DEV_FUNC_CAP_SDP 0x62 #define I40E_DEV_FUNC_CAP_MDIO 0x63 +#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 /** * i40e_parse_discover_capabilities @@ -2520,11 +2542,18 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; + case I40E_DEV_FUNC_CAP_WR_CSR_PROT: + p->wr_csr_prot = (u64)number; + p->wr_csr_prot |= (u64)logical_id << 32; + break; default: break; } } + if (p->fcoe) + i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); + /* Software override ensuring FCoE is disabled if npar or mfp * mode because it is not supported in these modes. */ @@ -2846,7 +2875,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (!status) + if (!status && filter_index) *filter_index = resp->index; return status; @@ -3376,6 +3405,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = cpu_to_le32(reg_addr0); + cmd_resp->address1 = cpu_to_le32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (!status) { + *reg_val0 = le32_to_cpu(cmd_resp->data0); + + if (reg_val1) + *reg_val1 = le32_to_cpu(cmd_resp->data1); + } + + return status; +} + +/** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL @@ -3439,3 +3509,136 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) break; } } + +/** + * i40e_aq_debug_dump + * @hw: pointer to the hardware structure + * @cluster_id: specific cluster to dump + * @table_id: table id within cluster + * @start_index: index of line in the block to read + * @buff_size: dump buffer size + * @buff: dump buffer + * @ret_buff_size: actual buffer size returned + * @ret_next_table: next block to read + * @ret_next_index: next index to read + * + * Dump internal FW/HW data for debug purposes. + * + **/ +i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_dump_internals *cmd = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + struct i40e_aqc_debug_dump_internals *resp = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_debug_dump_internals); + /* Indirect Command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + cmd->cluster_id = cluster_id; + cmd->table_id = table_id; + cmd->idx = cpu_to_le32(start_index); + + desc.datalen = cpu_to_le16(buff_size); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (ret_buff_size) + *ret_buff_size = le16_to_cpu(desc.datalen); + if (ret_next_table) + *ret_next_table = resp->table_id; + if (ret_next_index) + *ret_next_index = le32_to_cpu(resp->idx); + } + + return status; +} + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + i40e_status status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + if (bwd_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, + cmd_details); + + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 3ce43588592d..2547aa21b2ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -419,7 +419,7 @@ static void i40e_cee_to_dcb_v1_config( { u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); - u8 i, tc, err, sync, oper; + u8 i, tc, err; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; @@ -456,10 +456,8 @@ static void i40e_cee_to_dcb_v1_config( status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> I40E_AQC_CEE_APP_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; - sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; - oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; - /* Add APPs if Error is False and Oper/Sync is True */ - if (!err && sync && oper) { + /* Add APPs if Error is False */ + if (!err) { /* CEE operating configuration supports FCoE/iSCSI/FIP only */ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 183dcb63ce98..bd5079d5c1b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) u32 val; val = rd32(hw, I40E_PRTDCB_GENC); - *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> + *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> I40E_PRTDCB_GENC_PFCLDA_SHIFT); } @@ -178,6 +178,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return; + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + dcbxcfg = &hw->local_dcbx_config; /* Set up all the App TLVs if DCBx is negotiated */ @@ -223,7 +227,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, /** * i40e_dcbnl_del_app - Delete APP on all VSIs - * @pf: the corresponding pf + * @pf: the corresponding PF * @app: APP to delete * * Delete given APP from all the VSIs for given PF @@ -268,23 +272,26 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, /** * i40e_dcbnl_flush_apps - Delete all removed APPs - * @pf: the corresponding pf + * @pf: the corresponding PF + * @old_cfg: old DCBX configuration data * @new_cfg: new DCBX configuration data * * Find and delete all APPs that are not present in the passed * DCB configuration **/ void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { struct i40e_dcb_app_priority_table app; - struct i40e_dcbx_config *dcbxcfg; - struct i40e_hw *hw = &pf->hw; int i; - dcbxcfg = &hw->local_dcbx_config; - for (i = 0; i < dcbxcfg->numapps; i++) { - app = dcbxcfg->app[i]; + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + + for (i = 0; i < old_cfg->numapps; i++) { + app = old_cfg->app[i]; /* The APP is not available anymore delete it */ if (!i40e_dcbnl_find_app(new_cfg, &app)) i40e_dcbnl_del_app(pf, &app); @@ -306,9 +313,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return; - /* Do not setup DCB NL ops for MFP mode */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - dev->dcbnl_ops = &dcbnl_ops; + dev->dcbnl_ops = &dcbnl_ops; /* Set initial IEEE DCB settings */ i40e_dcbnl_set_all(vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 61236f983971..34170eabca7d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -35,7 +35,7 @@ static struct dentry *i40e_dbg_root; /** * i40e_dbg_find_vsi - searches for the vsi with the given seid - * @pf - the pf structure to search for the vsi + * @pf - the PF structure to search for the vsi * @seid - seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) @@ -54,7 +54,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) /** * i40e_dbg_find_veb - searches for the veb with the given seid - * @pf - the pf structure to search for the veb + * @pf - the PF structure to search for the veb * @seid - seid of the veb it is searching for **/ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) @@ -112,7 +112,7 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, /** * i40e_dbg_prep_dump_buf - * @pf: the pf we're working with + * @pf: the PF we're working with * @buflen: the desired buffer length * * Return positive if success, 0 if failed @@ -318,7 +318,7 @@ static const struct file_operations i40e_dbg_dump_fops = { * setup, adding or removing filters, or other things. Many of * these will be useful for some forms of unit testing. **************************************************************/ -static char i40e_dbg_command_buf[256] = "hello world"; +static char i40e_dbg_command_buf[256] = ""; /** * i40e_dbg_command_read - read for command datum @@ -390,6 +390,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", vsi->netdev_registered, vsi->current_netdev_flags, vsi->state, vsi->flags); + if (vsi == pf->vsi[pf->lan_vsi]) + dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + pf->hw.mac.addr, + pf->hw.mac.san_addr, + pf->hw.mac.port_addr); list_for_each_entry(f, &vsi->mac_filter_list, list) { dev_info(&pf->pdev->dev, " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", @@ -675,7 +680,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); if (vsi->back) - dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back); + dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back); dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); dev_info(&pf->pdev->dev, " tc_config: numtc = %d, enabled_tc = 0x%x\n", @@ -921,9 +926,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) return; } dev_info(&pf->pdev->dev, - "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n", + "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, - veb->uplink_seid); + veb->uplink_seid, + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); i40e_dbg_dump_eth_stats(pf, &veb->stats); } @@ -945,7 +951,7 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) /** * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR - * @pf: the pf that would be altered + * @pf: the PF that would be altered * @flag: flag that needs enabling or disabling * @enable: Enable/disable FD SD/ATR **/ @@ -957,7 +963,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) pf->flags &= ~flag; pf->auto_disable_flags |= flag; } - dev_info(&pf->pdev->dev, "requesting a pf reset\n"); + dev_info(&pf->pdev->dev, "requesting a PF reset\n"); i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } @@ -989,8 +995,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); - if (bytes_not_copied < 0) + if (bytes_not_copied < 0) { + kfree(cmd_buf); return bytes_not_copied; + } if (bytes_not_copied > 0) count -= bytes_not_copied; cmd_buf[count] = '\0'; @@ -1380,6 +1388,50 @@ static ssize_t i40e_dbg_command_write(struct file *filp, r_cfg->app[i].selector, r_cfg->app[i].protocolid); } + } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { + int cluster_id, table_id; + int index, ret; + u16 buff_len = 4096; + u32 next_index; + u8 next_table; + u8 *buff; + u16 rlen; + + cnt = sscanf(&cmd_buf[18], "%i %i %i", + &cluster_id, &table_id, &index); + if (cnt != 3) { + dev_info(&pf->pdev->dev, + "dump debug fwdata <cluster_id> <table_id> <index>\n"); + goto command_write_done; + } + + dev_info(&pf->pdev->dev, + "AQ debug dump fwdata params %x %x %x %x\n", + cluster_id, table_id, index, buff_len); + buff = kzalloc(buff_len, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, + index, buff_len, buff, &rlen, + &next_table, &next_index, + NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "debug dump fwdata AQ Failed %d 0x%x\n", + ret, pf->hw.aq.asq_last_status); + kfree(buff); + buff = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, + "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", + rlen, next_table, next_index); + print_hex_dump(KERN_INFO, "AQ buffer WB: ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, rlen, true); + kfree(buff); + buff = NULL; } else { dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); @@ -1485,11 +1537,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); } - } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) { - i40e_pf_reset_stats(pf); - dev_info(&pf->pdev->dev, "pf clear stats called\n"); + } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { + if (pf->hw.partition_id == 1) { + i40e_pf_reset_stats(pf); + dev_info(&pf->pdev->dev, "port stats cleared\n"); + } else { + dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); + } } else { - dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); + dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); } } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { struct i40e_aq_desc *desc; @@ -1891,11 +1947,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, " dump desc aq\n"); dev_info(&pf->pdev->dev, " dump reset stats\n"); + dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); dev_info(&pf->pdev->dev, " msg_enable [level]\n"); dev_info(&pf->pdev->dev, " read <reg>\n"); dev_info(&pf->pdev->dev, " write <reg> <value>\n"); dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); - dev_info(&pf->pdev->dev, " clear_stats pf\n"); + dev_info(&pf->pdev->dev, " clear_stats port\n"); dev_info(&pf->pdev->dev, " pfr\n"); dev_info(&pf->pdev->dev, " corer\n"); dev_info(&pf->pdev->dev, " globr\n"); @@ -1933,7 +1990,7 @@ static const struct file_operations i40e_dbg_command_fops = { * The netdev_ops entry in debugfs is for giving the driver commands * to be executed from the netdev operations. **************************************************************/ -static char i40e_dbg_netdev_ops_buf[256] = "hello world"; +static char i40e_dbg_netdev_ops_buf[256] = ""; /** * i40e_dbg_netdev_ops - read for netdev_ops datum @@ -2121,8 +2178,8 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { }; /** - * i40e_dbg_pf_init - setup the debugfs directory for the pf - * @pf: the pf that is starting up + * i40e_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { @@ -2158,8 +2215,8 @@ create_failed: } /** - * i40e_dbg_pf_exit - clear out the pf's debugfs entries - * @pf: the pf that is stopping + * i40e_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping **/ void i40e_dbg_pf_exit(struct i40e_pf *pf) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b8230dc205ec..4cbaaeb902c4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), I40E_PF_STAT("tx_errors", stats.eth.tx_errors), I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), - I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), I40E_PF_STAT("crc_errors", stats.crc_errors), I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), @@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) +static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { + "NPAR", +}; + +#define I40E_PRIV_FLAGS_STR_LEN \ + (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) + /** * i40e_partition_setting_complaint - generic complaint for MFP restriction * @pf: the PF struct @@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) } /** - * i40e_get_settings - Get Link Speed and Duplex settings + * i40e_get_settings_link_up - Get the Link settings for when link is up + * @hw: hw structure + * @ecmd: ethtool command to fill in * @netdev: network interface device structure - * @ecmd: ethtool command * - * Reports speed/duplex settings based on media_type **/ -static int i40e_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static void i40e_get_settings_link_up(struct i40e_hw *hw, + struct ethtool_cmd *ecmd, + struct net_device *netdev) { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_pf *pf = np->vsi->back; - struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; - bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; u32 link_speed = hw_link_info->link_speed; - /* hardware is either in 40G mode or 10G mode - * NOTE: this section initializes supported and advertising - */ - if (!link_up) { - /* link is down and the driver needs to fall back on - * device ID to determine what kinds of info to display, - * it's mostly a guess that may change when link is up - */ - switch (hw->device_id) { - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - /* pluggable QSFP */ - ecmd->supported = SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseLR4_Full; - ecmd->advertising = ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseLR4_Full; - break; - case I40E_DEV_ID_KX_B: - /* backplane 40G */ - ecmd->supported = SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_40000baseKR4_Full; - break; - case I40E_DEV_ID_KX_C: - /* backplane 10G */ - ecmd->supported = SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_10000baseKR_Full; - break; - case I40E_DEV_ID_10G_BASE_T: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - default: - /* all the rest are 10G/1G */ - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full; - break; - } - - /* skip phy_type use as it is zero when link is down */ - goto no_valid_phy_type; - } - + /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: @@ -304,6 +257,11 @@ static int i40e_get_settings(struct net_device *netdev, ecmd->advertising = ADVERTISED_Autoneg | ADVERTISED_40000baseCR4_Full; break; + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + case I40E_PHY_TYPE_40GBASE_AOC: + ecmd->supported = SUPPORTED_40000baseCR4_Full; + break; case I40E_PHY_TYPE_40GBASE_KR4: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_40000baseKR4_Full; @@ -311,13 +269,17 @@ static int i40e_get_settings(struct net_device *netdev, ADVERTISED_40000baseKR4_Full; break; case I40E_PHY_TYPE_40GBASE_SR4: - case I40E_PHY_TYPE_XLPPI: - case I40E_PHY_TYPE_XLAUI: ecmd->supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: ecmd->supported = SUPPORTED_40000baseLR4_Full; break; + case I40E_PHY_TYPE_20GBASE_KR2: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_20000baseKR2_Full; + break; case I40E_PHY_TYPE_10GBASE_KX4: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseKX4_Full; @@ -334,55 +296,56 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseKX_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseKX_Full; break; - case I40E_PHY_TYPE_10GBASE_CR1_CU: - case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_10GBASE_T: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + ADVERTISED_10000baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_SFI: case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_10GBASE_AOC: ecmd->supported = SUPPORTED_10000baseT_Full; break; - case I40E_PHY_TYPE_1000BASE_KX: - case I40E_PHY_TYPE_1000BASE_T: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - case I40E_PHY_TYPE_100BASE_TX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; break; default: /* if we got here and link is up something bad is afoot */ @@ -390,8 +353,125 @@ static int i40e_get_settings(struct net_device *netdev, hw_link_info->phy_type); } -no_valid_phy_type: - /* this is if autoneg is enabled or disabled */ + /* Set speed and duplex */ + switch (link_speed) { + case I40E_LINK_SPEED_40GB: + ethtool_cmd_speed_set(ecmd, SPEED_40000); + break; + case I40E_LINK_SPEED_20GB: + ethtool_cmd_speed_set(ecmd, SPEED_20000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; +} + +/** + * i40e_get_settings_link_down - Get the Link settings for when link is down + * @hw: hw structure + * @ecmd: ethtool command to fill in + * + * Reports link settings that can be determined when link is down + **/ +static void i40e_get_settings_link_down(struct i40e_hw *hw, + struct ethtool_cmd *ecmd) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + + /* link is down and the driver needs to fall back on + * device ID to determine what kinds of info to display, + * it's mostly a guess that may change when link is up + */ + switch (hw->device_id) { + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + /* pluggable QSFP */ + ecmd->supported = SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseLR4_Full; + ecmd->advertising = ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseLR4_Full; + break; + case I40E_DEV_ID_KX_B: + /* backplane 40G */ + ecmd->supported = SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_40000baseKR4_Full; + break; + case I40E_DEV_ID_KX_C: + /* backplane 10G */ + ecmd->supported = SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_10000baseKR_Full; + break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_DEV_ID_20G_KR2: + /* backplane 20G */ + ecmd->supported = SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_20000baseKR2_Full; + break; + default: + /* all the rest are 10G/1G */ + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + } + + /* With no link speed and duplex are unknown */ + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; +} + +/** + * i40e_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type + **/ +static int i40e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + + if (link_up) + i40e_get_settings_link_up(hw, ecmd, netdev); + else + i40e_get_settings_link_down(hw, ecmd); + + /* Now set the settings that don't rely on link being up/down */ + + /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -424,11 +504,13 @@ no_valid_phy_type: break; } + /* Set transceiver */ ecmd->transceiver = XCVR_EXTERNAL; + /* Set flow control settings */ ecmd->supported |= SUPPORTED_Pause; - switch (hw->fc.current_mode) { + switch (hw->fc.requested_mode) { case I40E_FC_FULL: ecmd->advertising |= ADVERTISED_Pause; break; @@ -445,30 +527,6 @@ no_valid_phy_type: break; } - if (link_up) { - switch (link_speed) { - case I40E_LINK_SPEED_40GB: - /* need a SPEED_40000 in ethtool.h */ - ethtool_cmd_speed_set(ecmd, 40000); - break; - case I40E_LINK_SPEED_10GB: - ethtool_cmd_speed_set(ecmd, SPEED_10000); - break; - case I40E_LINK_SPEED_1GB: - ethtool_cmd_speed_set(ecmd, SPEED_1000); - break; - case I40E_LINK_SPEED_100MB: - ethtool_cmd_speed_set(ecmd, SPEED_100); - break; - default: - break; - } - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } - return 0; } @@ -588,6 +646,8 @@ static int i40e_set_settings(struct net_device *netdev, advertise & ADVERTISED_10000baseKX4_Full || advertise & ADVERTISED_10000baseKR_Full) config.link_speed |= I40E_LINK_SPEED_10GB; + if (advertise & ADVERTISED_20000baseKR2_Full) + config.link_speed |= I40E_LINK_SPEED_20GB; if (advertise & ADVERTISED_40000baseKR4_Full || advertise & ADVERTISED_40000baseCR4_Full || advertise & ADVERTISED_40000baseSR4_Full || @@ -601,6 +661,8 @@ static int i40e_set_settings(struct net_device *netdev, config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; + /* save the requested speeds */ + hw->phy.link_info.requested_speeds = config.link_speed; /* set link and auto negotiation so changes take effect */ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* If link is up put link down */ @@ -621,7 +683,7 @@ static int i40e_set_settings(struct net_device *netdev, return -EAGAIN; } - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) netdev_info(netdev, "Updating link info failed with error %d\n", status); @@ -767,7 +829,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n", + netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", status, hw->aq.asq_last_status); err = -EAGAIN; } @@ -870,7 +932,9 @@ static int i40e_get_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val) + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || + (hw->debug_mask & I40E_DEBUG_NVM))) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -974,7 +1038,10 @@ static int i40e_set_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && + hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || + (hw->debug_mask & I40E_DEBUG_NVM))) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -998,6 +1065,7 @@ static void i40e_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1176,7 +1244,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_TEST: return I40E_TEST_LEN; case ETH_SS_STATS: - if (vsi == pf->vsi[pf->lan_vsi]) { + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { int len = I40E_PF_STATS_LEN(netdev); if (pf->lan_veb != I40E_NO_VEB) @@ -1185,6 +1253,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } else { return I40E_VSI_STATS_LEN(netdev); } + case ETH_SS_PRIV_FLAGS: + return I40E_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1247,7 +1317,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i += 2; } rcu_read_unlock(); - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; if (pf->lan_veb != I40E_NO_VEB) { @@ -1320,7 +1390,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); p += ETH_GSTRING_LEN; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; if (pf->lan_veb != I40E_NO_VEB) { @@ -1358,6 +1428,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + default: + break; } } @@ -1473,6 +1552,7 @@ static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); + bool if_running = netif_running(netdev); struct i40e_pf *pf = np->vsi->back; if (eth_test->flags == ETH_TEST_FL_OFFLINE) { @@ -1480,6 +1560,12 @@ static void i40e_diag_test(struct net_device *netdev, netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, &pf->state); + /* If the device is online then take it offline */ + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); /* Link test performed before hardware reset * so autoneg doesn't interfere with test result @@ -1502,6 +1588,9 @@ static void i40e_diag_test(struct net_device *netdev, clear_bit(__I40E_TESTING, &pf->state); i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + if (if_running) + dev_open(netdev); } else { /* Online tests */ netif_info(pf, drv, netdev, "online testing starting\n"); @@ -1599,6 +1688,8 @@ static int i40e_set_phys_id(struct net_device *netdev, case ETHTOOL_ID_INACTIVE: i40e_led_set(hw, pf->led_status, false); break; + default: + break; } return 0; @@ -1703,6 +1794,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { cmd->data = 0; + if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { + cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; + cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; + return 0; + } /* Report default options for RSS on i40e */ switch (cmd->flow_type) { case TCP_V4_FLOW: @@ -1817,6 +1913,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, else fsp->ring_cookie = rule->q_index; + if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { + struct i40e_vsi *vsi; + + vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); + if (vsi && vsi->type == I40E_VSI_SRIOV) { + fsp->h_ext.data[1] = htonl(vsi->vf_id); + fsp->m_ext.data[1] = htonl(0x1); + } + } + return 0; } @@ -1974,6 +2080,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); + /* Save setting for future output/update */ + pf->vsi[pf->lan_vsi]->rxnfc = *nfc; + return 0; } @@ -2107,6 +2216,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, struct i40e_fdir_filter *input; struct i40e_pf *pf; int ret = -EINVAL; + u16 vf_id; if (!vsi) return -EINVAL; @@ -2167,7 +2277,22 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + if (ntohl(fsp->m_ext.data[1])) { + if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { + netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); + goto free_input; + } + vf_id = ntohl(fsp->h_ext.data[1]); + /* Find vsi id from vf id and override dest vsi */ + input->dest_vsi = pf->vf[vf_id].lan_vsi_id; + if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { + netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); + goto free_input; + } + } + ret = i40e_add_del_fdir(vsi, input, true); +free_input: if (ret) kfree(input); else @@ -2281,10 +2406,6 @@ static int i40e_set_channels(struct net_device *dev, /* update feature limits from largest to smallest supported values */ /* TODO: Flow director limit, DCB etc */ - /* cap RSS limit */ - if (count > pf->rss_size_max) - count = pf->rss_size_max; - /* use rss_reconfig to rebuild with new queue count and update traffic * class queue mapping */ @@ -2295,6 +2416,133 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +/** + * i40e_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_key_size(struct net_device *netdev) +{ + return I40E_HKEY_ARRAY_SIZE; +} + +/** + * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) +{ + return I40E_HLUT_ARRAY_SIZE; +} + +static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HLUT(i)); + indir[j++] = reg_val & 0xff; + indir[j++] = (reg_val >> 8) & 0xff; + indir[j++] = (reg_val >> 16) & 0xff; + indir[j++] = (reg_val >> 24) & 0xff; + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HKEY(i)); + key[j++] = (u8)(reg_val & 0xff); + key[j++] = (u8)((reg_val >> 8) & 0xff); + key[j++] = (u8)((reg_val >> 16) & 0xff); + key[j++] = (u8)((reg_val >> 24) & 0xff); + } + } + return 0; +} + +/** + * i40e_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = indir[j++]; + reg_val |= indir[j++] << 8; + reg_val |= indir[j++] << 16; + reg_val |= indir[j++] << 24; + wr32(hw, I40E_PFQF_HLUT(i), reg_val); + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = key[j++]; + reg_val |= key[j++] << 8; + reg_val |= key[j++] << 16; + reg_val |= key[j++] << 24; + wr32(hw, I40E_PFQF_HKEY(i), reg_val); + } + } + return 0; +} + +/** + * i40e_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40e_get_priv_flags(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 ret_flags = 0; + + ret_flags |= pf->hw.func_caps.npar_enable ? + I40E_PRIV_FLAGS_NPAR_FLAG : 0; + + return ret_flags; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_settings = i40e_get_settings, .set_settings = i40e_set_settings, @@ -2323,9 +2571,14 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_ethtool_stats = i40e_get_ethtool_stats, .get_coalesce = i40e_get_coalesce, .set_coalesce = i40e_set_coalesce, + .get_rxfh_key_size = i40e_get_rxfh_key_size, + .get_rxfh_indir_size = i40e_get_rxfh_indir_size, + .get_rxfh = i40e_get_rxfh, + .set_rxfh = i40e_set_rxfh, .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, .get_ts_info = i40e_get_ts_info, + .get_priv_flags = i40e_get_priv_flags, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 27c206e62da7..1803afeef23e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -24,7 +24,6 @@ * ******************************************************************************/ - #include <linux/if_ether.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -150,7 +149,7 @@ static inline bool i40e_fcoe_xid_is_valid(u16 xid) /** * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated - * @pf: pointer to pf + * @pf: pointer to PF * @ddp: sw DDP context * * Unmap the scatter-gather list associated with the given SW DDP context @@ -269,7 +268,7 @@ out: /** * i40e_fcoe_sw_init - sets up the HW for FCoE - * @pf: pointer to pf + * @pf: pointer to PF * * Returns 0 if FCoE is supported otherwise the error code **/ @@ -329,7 +328,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) /** * i40e_get_fcoe_tc_map - Return TC map for FCoE APP - * @pf: pointer to pf + * @pf: pointer to PF * **/ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) @@ -381,12 +380,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) ctxt->pf_num = hw->pf_id; ctxt->vf_num = 0; ctxt->uplink_seid = vsi->uplink_seid; - ctxt->connection_type = 0x1; + ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt->flags = I40E_AQ_VSI_TYPE_PF; /* FCoE VSI would need the following sections */ - info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID | - I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); /* FCoE VSI does not need these sections */ info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | @@ -395,7 +393,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) I40E_AQ_VSI_PROP_INGRESS_UP_VALID | I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); - info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + info->valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + info->switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } enabled_tc = i40e_get_fcoe_tc_map(pf); i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); @@ -1303,8 +1306,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring, /* MACLEN is ether header length in words not bytes */ td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; - return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); } /** @@ -1443,7 +1445,6 @@ static int i40e_fcoe_set_features(struct net_device *netdev, return 0; } - static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -1470,6 +1471,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_set_features = i40e_fcoe_set_features, }; +/* fcoe network device type */ +static struct device_type fcoe_netdev_type = { + .name = "fcoe", +}; + /** * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI * @vsi: pointer to the associated VSI struct @@ -1503,6 +1509,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); netdev->mtu = FCOE_MTU; SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); /* set different dev_port value 1 for FCoE netdev than the default * zero dev_port value for PF netdev, this helps biosdevname user * tool to differentiate them correctly while both attached to the @@ -1523,7 +1530,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) /** * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI - * @pf: the pf that VSI is associated with + * @pf: the PF that VSI is associated with * **/ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) @@ -1550,7 +1557,7 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0); if (vsi) { dev_dbg(&pf->pdev->dev, - "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n", + "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n", vsi->seid, vsi->id, vsi->uplink_seid, seid); } else { dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h index 21e0f582031c..0d49e2d15d40 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -37,7 +37,6 @@ #define I40E_FILTER_CONTEXT_DESC(R, i) \ (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i])) - /* receive queue descriptor filter status for FCoE */ #define I40E_RX_DESC_FLTSTAT_FCMASK 0x3 #define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 4627588f4613..0079ad7bcd0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -856,7 +856,7 @@ static void i40e_write_dword(u8 *hmc_bits, if (ce_info->width < 32) mask = ((u32)1 << ce_info->width) - 1; else - mask = 0xFFFFFFFF; + mask = ~(u32)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines @@ -908,7 +908,7 @@ static void i40e_write_qword(u8 *hmc_bits, if (ce_info->width < 64) mask = ((u64)1 << ce_info->width) - 1; else - mask = 0xFFFFFFFFFFFFFFFF; + mask = ~(u64)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cbe281be1c9f..24481cd7e59a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,8 +38,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 6 +#define DRV_VERSION_MINOR 3 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -75,6 +75,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, /* required last entry */ {0, } }; @@ -249,6 +250,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) } /** + * i40e_find_vsi_from_id - searches for the vsi with the given id + * @pf - the pf structure to search for the vsi + * @id - id of the vsi it is searching for + **/ +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->id == id)) + return pf->vsi[i]; + + return NULL; +} + +/** * i40e_service_event_schedule - Schedule the service task to wake up * @pf: board private structure * @@ -450,7 +467,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) } /** - * i40e_pf_reset_stats - Reset all of the stats for the given pf + * i40e_pf_reset_stats - Reset all of the stats for the given PF * @pf: the PF to be reset **/ void i40e_pf_reset_stats(struct i40e_pf *pf) @@ -896,7 +913,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) } /** - * i40e_update_pf_stats - Update the pf statistics counters. + * i40e_update_pf_stats - Update the PF statistics counters. * @pf: the PF to be updated **/ static void i40e_update_pf_stats(struct i40e_pf *pf) @@ -919,11 +936,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); - i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), - pf->stat_offsets_loaded, - &osd->eth.tx_discards, - &nsd->eth.tx_discards); - i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, @@ -1133,7 +1145,7 @@ void i40e_update_stats(struct i40e_vsi *vsi) * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL @@ -1161,7 +1173,7 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, * i40e_find_mac - Find a mac addr in the macvlan filters list * @vsi: the VSI to be searched * @macaddr: the MAC address we are searching for - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns the first filter with the provided MAC address or NULL if @@ -1209,7 +1221,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered - * @is_vf: true if it is a vf + * @is_vf: true if it is a VF * @is_netdev: true if it is a netdev * * Goes through all the macvlan filters and adds a @@ -1270,7 +1282,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. @@ -1330,7 +1342,7 @@ add_filter_out: * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure it's a vf filter, else doesn't matter + * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter **/ void i40e_del_filter(struct i40e_vsi *vsi, @@ -1357,7 +1369,7 @@ void i40e_del_filter(struct i40e_vsi *vsi, f->counter--; } } else { - /* make sure we don't remove a filter in use by vf or netdev */ + /* make sure we don't remove a filter in use by VF or netdev */ int min_f = 0; min_f += (f->is_vf ? 1 : 0); min_f += (f->is_netdev ? 1 : 0); @@ -1512,7 +1524,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - num_tc_qps = vsi->alloc_queue_pairs/numtc; + /* In MFP case we can have a much lower count of MSIx + * vectors available and so we need to lower the used + * q count. + */ + qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); /* Setup queue offset/count for all TCs for given VSI */ @@ -1541,7 +1558,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; - /* find the power-of-2 of the number of queue pairs */ + /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; while (num_qps && ((1 << pow) < qcount)) { @@ -1571,6 +1588,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset; + if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else + vsi->num_queue_pairs = pf->num_lan_msix; + } /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -1962,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; ctxt.seid = vsi->seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, @@ -1991,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) I40E_AQ_VSI_PVLAN_EMOD_NOTHING; ctxt.seid = vsi->seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, @@ -2275,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) I40E_AQ_VSI_PVLAN_EMOD_STR; ctxt.seid = vsi->seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (aq_ret) { dev_info(&vsi->back->pdev->dev, @@ -2393,20 +2416,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) struct i40e_vsi *vsi = ring->vsi; cpumask_var_t mask; - if (ring->q_vector && ring->netdev) { - /* Single TC mode enable XPS */ - if (vsi->tc_config.numtc <= 1 && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + if (!ring->q_vector || !ring->netdev) + return; + + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1) { + if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, ring->queue_index); - } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - /* Disable XPS to allow selection based on TC */ - bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); - netif_set_xps_queue(ring->netdev, mask, - ring->queue_index); - free_cpumask_var(mask); - } + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, ring->queue_index); + free_cpumask_var(mask); } } @@ -2591,7 +2614,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (ring_is_ps_enabled(ring)) { + i40e_alloc_rx_headers(ring); + i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); + } else { + i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); + } return 0; } @@ -2684,8 +2712,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) u16 qoffset, qcount; int i, n; - if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) - return; + if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + /* Reset the TC information */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; + rx_ring->dcb_tc = 0; + tx_ring->dcb_tc = 0; + } + } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { if (!(vsi->tc_config.enabled_tc & (1 << n))) @@ -3171,13 +3206,16 @@ static irqreturn_t i40e_intr(int irq, void *data) pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; - set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); } } if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; dev_info(&pf->pdev->dev, "HMC error interrupt\n"); + dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", + rd32(hw, I40E_PFHMC_ERRORINFO), + rd32(hw, I40E_PFHMC_ERRORDATA)); } if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { @@ -3813,6 +3851,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; + kfree(pf->irq_pile); + pf->irq_pile = NULL; } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { pci_disable_msi(pf->pdev); } @@ -3830,6 +3870,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; + i40e_stop_misc_vector(pf); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } + i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) @@ -4003,7 +4049,7 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) #endif /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP - * @pf: pointer to pf + * @pf: pointer to PF * * Get TC map for ISCSI PF type that will include iSCSI TC * and LAN TC. @@ -4101,7 +4147,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else - enabled_tc = pf->hw.func_caps.enabled_tcmap; + return 1; /* Only TC0 */ /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); @@ -4151,11 +4197,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); - /* MPF enabled and iSCSI PF type */ + /* MFP enabled and iSCSI PF type */ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else - return pf->hw.func_caps.enabled_tcmap; + return i40e_pf_get_default_tc(pf); } /** @@ -4178,7 +4224,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi bw config, err %d, aq_err %d\n", + "couldn't get PF vsi bw config, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); return -EINVAL; } @@ -4188,7 +4234,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", + "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); return -EINVAL; } @@ -4365,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ctxt.pf_num = vsi->back->hw.pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); /* Update the VSI after updating the VSI queue-mapping information */ @@ -4545,6 +4591,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err = 0; + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) + goto out; + /* Get the initial DCB configuration */ err = i40e_init_dcb(hw); if (!err) { @@ -4608,6 +4659,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) case I40E_LINK_SPEED_40GB: strlcpy(speed, "40 Gbps", SPEED_SIZE); break; + case I40E_LINK_SPEED_20GB: + strncpy(speed, "20 Gbps", SPEED_SIZE); + break; case I40E_LINK_SPEED_10GB: strlcpy(speed, "10 Gbps", SPEED_SIZE); break; @@ -4835,11 +4889,7 @@ exit: * * Returns 0 on success, negative value on failure **/ -#ifdef I40E_FCOE int i40e_open(struct net_device *netdev) -#else -static int i40e_open(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -4949,7 +4999,7 @@ err_setup_tx: /** * i40e_fdir_filter_exit - Cleans up the Flow Director accounting - * @pf: Pointer to pf + * @pf: Pointer to PF * * This function destroys the hlist where all the Flow Director * filters were saved. @@ -5037,24 +5087,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { - - /* Request a Firmware Reset - * - * Same as Global reset, plus restarting the - * embedded firmware engine. - */ - /* enable EMP Reset */ - val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); - val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; - wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); - - /* force the reset */ - val = rd32(&pf->hw, I40E_GLGEN_RTRIG); - val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; - wr32(&pf->hw, I40E_GLGEN_RTRIG, val); - i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset @@ -5177,7 +5209,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_aqc_lldp_get_mib *mib = (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; struct i40e_hw *hw = &pf->hw; - struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; int ret = 0; @@ -5208,10 +5239,11 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); /* Store the old configuration */ - tmp_dcbx_cfg = *dcbx_cfg; + tmp_dcbx_cfg = hw->local_dcbx_config; + /* Reset the old DCBx configuration data */ + memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { @@ -5220,20 +5252,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, } /* No change detected in DCBX configs */ - if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { + if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, + sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } - need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, + &hw->local_dcbx_config); - i40e_dcbnl_flush_apps(pf, dcbx_cfg); + i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); if (!need_reconfig) goto exit; /* Enable DCB tagging only when more than one TC */ - if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; @@ -5254,8 +5288,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Wait for the PF's Tx queues to be disabled */ ret = i40e_pf_wait_txq_disabled(pf); - if (!ret) + if (ret) { + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } else { i40e_pf_unquiesce_all_vsi(pf); + } + exit: return ret; } @@ -5327,9 +5367,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf) * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters * @pf: board private structure **/ -int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) { - int val, fcnt_prog; + u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); @@ -5337,12 +5377,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) } /** - * i40e_get_current_fd_count - Get the count of total FD filters programmed + * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ -int i40e_get_current_fd_count(struct i40e_pf *pf) +u32 i40e_get_current_fd_count(struct i40e_pf *pf) { - int val, fcnt_prog; + u32 val, fcnt_prog; + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> @@ -5351,6 +5392,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf) } /** + * i40e_get_global_fd_count - Get total FD filters programmed on device + * @pf: board private structure + **/ +u32 i40e_get_global_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); + fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> + I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); + return fcnt_prog; +} + +/** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure **/ @@ -5364,7 +5420,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) /* Check if, FD SB or ATR was auto disabled and if there is enough room * to re-enable */ - fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); + fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || @@ -5386,13 +5442,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) } #define I40E_MIN_FD_FLUSH_INTERVAL 10 +#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 /** * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB * @pf: board private structure **/ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) { + unsigned long min_flush_time; int flush_wait_retry = 50; + bool disable_atr = false; + int fd_room; int reg; if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) @@ -5400,9 +5460,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { - set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + /* If the flush is happening too quick and we have mostly + * SB rules we should not re-enable ATR for some time. + */ + min_flush_time = pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); + fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; + + if (!(time_after(jiffies, min_flush_time)) && + (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + disable_atr = true; + } + pf->fd_flush_timestamp = jiffies; - pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, @@ -5422,10 +5493,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); - - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + if (!disable_atr) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } @@ -5436,7 +5505,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed * @pf: board private structure **/ -int i40e_get_current_atr_cnt(struct i40e_pf *pf) +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) { return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } @@ -5462,9 +5531,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) return; - if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && - (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && - (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); @@ -5587,7 +5654,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) int i, v; /* If we're down or resetting, just bail */ - if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; /* for each VSI/netdev @@ -5732,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_hw *hw = &pf->hw; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; - struct i40e_link_status *hw_link_info = &hw->phy.link_info; /* save off old link status information */ - memcpy(&pf->hw.phy.link_info_old, hw_link_info, - sizeof(pf->hw.phy.link_info_old)); + hw->phy.link_info_old = hw->phy.link_info; /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct @@ -5850,6 +5916,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) case i40e_aqc_opc_send_msg_to_peer: dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); break; + case i40e_aqc_opc_nvm_erase: + case i40e_aqc_opc_nvm_update: + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + break; default: dev_info(&pf->pdev->dev, "ARQ Error: Unknown event 0x%04x received\n", @@ -5894,6 +5964,94 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) } /** + * i40e_enable_pf_switch_lb + * @pf: pointer to the PF structure + * + * enable switch loop back or die - no point in a return value + **/ +static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_disable_pf_switch_lb + * @pf: pointer to the PF structure + * + * disable switch loop back or die - no point in a return value + **/ +static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_config_bridge_mode - Configure the HW bridge mode + * @veb: pointer to the bridge instance + * + * Configure the loop back mode for the LAN VSI that is downlink to the + * specified HW bridge instance. It is expected this function is called + * when a new HW bridge is instantiated. + **/ +static void i40e_config_bridge_mode(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (veb->bridge_mode & BRIDGE_MODE_VEPA) + i40e_disable_pf_switch_lb(pf); + else + i40e_enable_pf_switch_lb(pf); +} + +/** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * @@ -5939,8 +6097,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; - /* Enable LB mode for the main VSI now that it is on a VEB */ - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { @@ -6112,7 +6269,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * - * Close up the VFs and other things in prep for pf Reset. + * Close up the VFs and other things in prep for PF Reset. **/ static void i40e_prep_for_reset(struct i40e_pf *pf) { @@ -6197,10 +6354,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { - clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) i40e_verify_eeprom(pf); - } i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); @@ -6310,13 +6465,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } - msleep(75); - ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (ret) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); @@ -6339,7 +6495,7 @@ clear_recovery: } /** - * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild + * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild * @pf: board private structure * * Close up the VFs and other things in prep for a Core Reset, @@ -6353,7 +6509,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf) /** * i40e_handle_mdd_event - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * Called from the MDD irq handler to identify possibly malicious vfs **/ @@ -6382,7 +6538,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_TX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; @@ -6468,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; i40e_status ret; - u8 filter_index; __be16 port; int i; @@ -6481,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) if (pf->pending_vxlan_bitmap & (1 << i)) { pf->pending_vxlan_bitmap &= ~(1 << i); port = pf->vxlan_ports[i]; - ret = port ? - i40e_aq_add_udp_tunnel(hw, ntohs(port), + if (port) + ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), I40E_AQC_TUNNEL_TYPE_VXLAN, - &filter_index, NULL) - : i40e_aq_del_udp_tunnel(hw, i, NULL); + NULL, NULL); + else + ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", - port ? "adding" : "deleting", - ntohs(port), port ? i : i); - + dev_info(&pf->pdev->dev, + "%s vxlan port %d, index %d failed, err %d, aq_err %d\n", + port ? "add" : "delete", + ntohs(port), i, ret, + pf->hw.aq.asq_last_status); pf->vxlan_ports[i] = 0; - } else { - dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", - port ? "Added" : "Deleted", - ntohs(port), port ? i : filter_index); } } } @@ -6703,6 +6856,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; + vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? + pf->rss_table_size : 64; vsi->netdev_registered = false; vsi->work_limit = I40E_DEFAULT_IRQ_WORK; INIT_LIST_HEAD(&vsi->mac_filter_list); @@ -6783,7 +6938,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) goto unlock_vsi; } - /* updates the pf for this cleared vsi */ + /* updates the PF for this cleared vsi */ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); @@ -6896,15 +7051,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) * * Work with the OS to set up the MSIX vectors needed. * - * Returns 0 on success, negative on failure + * Returns the number of vectors reserved or negative on failure **/ static int i40e_init_msix(struct i40e_pf *pf) { - i40e_status err = 0; struct i40e_hw *hw = &pf->hw; - int other_vecs = 0; + int vectors_left; int v_budget, i; - int vec; + int v_actual; if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return -ENODEV; @@ -6926,24 +7080,62 @@ static int i40e_init_msix(struct i40e_pf *pf) * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ - pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); - pf->num_vmdq_msix = pf->num_vmdq_qps; - other_vecs = 1; - other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) - other_vecs++; - - /* Scale down if necessary, and the rings will share vectors */ - pf->num_lan_msix = min_t(int, pf->num_lan_msix, - (hw->func_caps.num_msix_vectors - other_vecs)); - v_budget = pf->num_lan_msix + other_vecs; + vectors_left = hw->func_caps.num_msix_vectors; + v_budget = 0; + + /* reserve one vector for miscellaneous handler */ + if (vectors_left) { + v_budget++; + vectors_left--; + } + + /* reserve vectors for the main PF traffic queues */ + pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); + vectors_left -= pf->num_lan_msix; + v_budget += pf->num_lan_msix; + + /* reserve one vector for sideband flow director */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (vectors_left) { + v_budget++; + vectors_left--; + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + } + } #ifdef I40E_FCOE + /* can we reserve enough for FCoE? */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - pf->num_fcoe_msix = pf->num_fcoe_qps; + if (!vectors_left) + pf->num_fcoe_msix = 0; + else if (vectors_left >= pf->num_fcoe_qps) + pf->num_fcoe_msix = pf->num_fcoe_qps; + else + pf->num_fcoe_msix = 1; v_budget += pf->num_fcoe_msix; + vectors_left -= pf->num_fcoe_msix; } + #endif + /* any vectors left over go for VMDq support */ + if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; + int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); + + /* if we're short on vectors for what's desired, we limit + * the queues per vmdq. If this is still more than are + * available, the user will need to change the number of + * queues/vectors used by the PF later with the ethtool + * channels command + */ + if (vmdq_vecs < vmdq_vecs_wanted) + pf->num_vmdq_qps = 1; + pf->num_vmdq_msix = pf->num_vmdq_qps; + + v_budget += vmdq_vecs; + vectors_left -= vmdq_vecs; + } pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -6952,9 +7144,9 @@ static int i40e_init_msix(struct i40e_pf *pf) for (i = 0; i < v_budget; i++) pf->msix_entries[i].entry = i; - vec = i40e_reserve_msix_vectors(pf, v_budget); + v_actual = i40e_reserve_msix_vectors(pf, v_budget); - if (vec != v_budget) { + if (v_actual != v_budget) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable @@ -6967,26 +7159,30 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_msix = 0; } - if (vec < I40E_MIN_MSIX) { + if (v_actual < I40E_MIN_MSIX) { pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; return -ENODEV; - } else if (vec == I40E_MIN_MSIX) { + } else if (v_actual == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; pf->num_lan_qps = 1; pf->num_lan_msix = 1; - } else if (vec != v_budget) { + } else if (v_actual != v_budget) { + int vec; + /* reserve the misc vector */ - vec--; + vec = v_actual - 1; /* Scale vector usage down */ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; + pf->num_vmdq_qps = 1; + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; /* partition out the remaining vectors */ switch (vec) { @@ -7012,10 +7208,8 @@ static int i40e_init_msix(struct i40e_pf *pf) vec--; } #endif - pf->num_lan_msix = min_t(int, (vec / 2), - pf->num_lan_qps); - pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), - I40E_DEFAULT_NUM_VMDQ_VSI); + /* give the rest to the PF */ + pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); break; } } @@ -7032,7 +7226,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_FCOE_ENABLED; } #endif - return err; + return v_actual; } /** @@ -7107,13 +7301,14 @@ err_out: * i40e_init_interrupt_scheme - Determine proper interrupt scheme * @pf: board private structure to initialize **/ -static void i40e_init_interrupt_scheme(struct i40e_pf *pf) +static int i40e_init_interrupt_scheme(struct i40e_pf *pf) { - int err = 0; + int vectors = 0; + ssize_t size; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - err = i40e_init_msix(pf); - if (err) { + vectors = i40e_init_msix(pf); + if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | @@ -7133,18 +7328,32 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); - err = pci_enable_msi(pf->pdev); - if (err) { - dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); + vectors = pci_enable_msi(pf->pdev); + if (vectors < 0) { + dev_info(&pf->pdev->dev, "MSI init failed - %d\n", + vectors); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } + vectors = 1; /* one MSI or Legacy vector */ } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); - /* track first vector for misc interrupts */ - err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); + /* set up vector assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + return -ENOMEM; + } + pf->irq_pile->num_entries = vectors; + pf->irq_pile->search_hint = 0; + + /* track first vector for misc interrupts, ignore return */ + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); + + return 0; } /** @@ -7194,6 +7403,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) static int i40e_config_rss(struct i40e_pf *pf) { u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; u32 lut = 0; int i, j; @@ -7211,15 +7421,14 @@ static int i40e_config_rss(struct i40e_pf *pf) wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + /* Check capability and Set table size and register per hw expectation*/ reg_val = rd32(hw, I40E_PFQF_CTL_0); - if (hw->func_caps.rss_table_size == 512) { + if (pf->rss_table_size == 512) reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; - pf->rss_table_size = 512; - } else { - pf->rss_table_size = 128; + else reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; - } wr32(hw, I40E_PFQF_CTL_0, reg_val); /* Populate the LUT with max no. of queues in round robin fashion */ @@ -7232,7 +7441,7 @@ static int i40e_config_rss(struct i40e_pf *pf) * If LAN VSI is the only consumer for RSS then this requirement * is not necessary. */ - if (j == pf->rss_size) + if (j == vsi->rss_size) j = 0; /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (j & @@ -7256,15 +7465,19 @@ static int i40e_config_rss(struct i40e_pf *pf) **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + int new_rss_size; + if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; - queue_count = min_t(int, queue_count, pf->rss_size_max); + new_rss_size = min_t(int, queue_count, pf->rss_size_max); - if (queue_count != pf->rss_size) { + if (queue_count != vsi->num_queue_pairs) { + vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); - pf->rss_size = queue_count; + pf->rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); i40e_config_rss(pf); @@ -7274,6 +7487,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) } /** + * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) +{ + i40e_status status; + bool min_valid, max_valid; + u32 max_bw, min_bw; + + status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, + &min_valid, &max_valid); + + if (!status) { + if (min_valid) + pf->npar_min_bw = min_bw; + if (max_valid) + pf->npar_max_bw = max_bw; + } + + return status; +} + +/** + * i40e_set_npar_bw_setting - Set BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) +{ + struct i40e_aqc_configure_partition_bw_data bw_data; + i40e_status status; + + /* Set the valid bit for this PF */ + bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; + bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; + + /* Set the new bandwidths */ + status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); + + return status; +} + +/** + * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) +{ + /* Commit temporary BW setting to permanent NVM image */ + enum i40e_admin_queue_err last_aq_status; + i40e_status ret; + u16 nvm_word; + + if (pf->hw.partition_id != 1) { + dev_info(&pf->pdev->dev, + "Commit BW only works on partition 1! This is partition %d", + pf->hw.partition_id); + ret = I40E_NOT_SUPPORTED; + goto bw_commit_out; + } + + /* Acquire NVM for read access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for read access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Read word 0x10 of NVM - SW compatibility word 1 */ + ret = i40e_aq_read_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), &nvm_word, + false, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Wait a bit for NVM release to complete */ + msleep(50); + + /* Acquire NVM for write access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for write access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + /* Write it back out unchanged to initiate update NVM, + * which will force a write of the shadow (alt) RAM to + * the NVM - thus storing the bandwidth values permanently. + */ + ret = i40e_aq_update_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), + &nvm_word, true, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) + dev_info(&pf->pdev->dev, + "BW settings NOT SAVED, err %d aq_err %d\n", + ret, last_aq_status); +bw_commit_out: + + return ret; +} + +/** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * @@ -7299,8 +7634,12 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | - I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_RX_1BUF_ENABLED; + I40E_FLAG_MSIX_ENABLED; + + if (iommu_present(&pci_bus_type)) + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + else + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; /* Set default ITR */ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; @@ -7311,6 +7650,7 @@ static int i40e_sw_init(struct i40e_pf *pf) */ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; pf->rss_size = 1; + pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { @@ -7322,6 +7662,13 @@ static int i40e_sw_init(struct i40e_pf *pf) if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); + if (i40e_get_npar_bw_setting(pf)) + dev_warn(&pf->pdev->dev, + "Could not get NPAR bw settings\n"); + else + dev_info(&pf->pdev->dev, + "Min BW = %8.8x, Max BW = %8.8x\n", + pf->npar_min_bw, pf->npar_max_bw); } /* FW/NVM is not yet fixed in this regard */ @@ -7329,11 +7676,11 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - /* Setup a counter for fd_atr per pf */ + /* Setup a counter for fd_atr per PF */ pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; - /* Setup a counter for fd_sb per pf */ + /* Setup a counter for fd_sb per PF */ pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); } else { dev_info(&pf->pdev->dev, @@ -7381,22 +7728,14 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; pf->qp_pile->search_hint = 0; - /* set up vector assignment tracking */ - size = sizeof(struct i40e_lump_tracking) - + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); - pf->irq_pile = kzalloc(size, GFP_KERNEL); - if (!pf->irq_pile) { - kfree(pf->qp_pile); - err = -ENOMEM; - goto sw_init_done; - } - pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; - pf->irq_pile->search_hint = 0; - pf->tx_timeout_recovery_level = 1; mutex_init(&pf->switch_mutex); + /* If NPAR is enabled nudge the Tx scheduler */ + if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) + i40e_set_npar_bw_setting(pf); + sw_init_done: return err; } @@ -7509,7 +7848,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); + netdev_info(netdev, "vxlan port %d already offloaded\n", + ntohs(port)); return; } @@ -7517,7 +7857,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, next_idx = i40e_get_vxlan_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", + netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", ntohs(port)); return; } @@ -7525,8 +7865,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->vxlan_ports[next_idx] = port; pf->pending_vxlan_bitmap |= (1 << next_idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); } /** @@ -7554,12 +7895,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev, * and make it pending */ pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= (1 << idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", + ntohs(port)); } else { - netdev_warn(netdev, "Port %d was not found, not deleting\n", + netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } } @@ -7628,6 +7970,118 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } +#ifdef HAVE_BRIDGE_ATTRIBS +/** + * i40e_ndo_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * + * Inserts a new hardware bridge if not already created and + * enables the bridging mode requested (VEB or VEPA). If the + * hardware bridge has already been inserted and the request + * is to change the mode then that requires a PF reset to + * allow rebuild of the components with required hardware + * bridge mode enabled. + **/ +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + struct nlattr *attr, *br_spec; + int i, rem; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if ((mode != BRIDGE_MODE_VEPA) && + (mode != BRIDGE_MODE_VEB)) + return -EINVAL; + + /* Insert a new HW bridge */ + if (!veb) { + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + veb->bridge_mode = mode; + i40e_config_bridge_mode(veb); + } else { + /* No Bridge HW offload available */ + return -ENOENT; + } + break; + } else if (mode != veb->bridge_mode) { + /* Existing HW bridge but different mode needs reset */ + veb->bridge_mode = mode; + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + break; + } + } + + return 0; +} + +/** + * i40e_ndo_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq # + * @dev: the netdev being configured + * @filter_mask: unused + * + * Return the mode in which the hardware bridge is operating in + * i.e VEB or VEPA. + **/ +#ifdef HAVE_BRIDGE_FILTER +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_BRIDGE_FILTER */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + int i; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for the PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + if (!veb) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ + static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -7662,6 +8116,10 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_getlink = i40e_ndo_bridge_getlink, + .ndo_bridge_setlink = i40e_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -7774,6 +8232,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi) } /** + * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB + * @vsi: the VSI being queried + * + * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode + **/ +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + struct i40e_pf *pf = vsi->back; + + /* Uplink is not a bridge so default to VEB */ + if (vsi->veb_idx == I40E_NO_VEB) + return 1; + + veb = pf->veb[vsi->veb_idx]; + /* Uplink is a bridge in VEPA mode */ + if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + return 0; + + /* Uplink is a bridge in VEB mode */ + return 1; +} + +/** * i40e_add_vsi - Add a VSI to the switch * @vsi: the VSI being configured * @@ -7805,11 +8287,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi config, err %d, aq_err %d\n", + "couldn't get PF vsi config, err %d, aq_err %d\n", ret, pf->hw.aq.asq_last_status); return -ENOENT; } - memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); + vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; @@ -7858,12 +8340,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections |= + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id = + ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -7871,16 +8355,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = 0; - ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); @@ -7890,15 +8376,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; @@ -7936,7 +8425,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = -ENOENT; goto err; } - memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); + vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; @@ -8256,7 +8745,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) @@ -8699,7 +9188,7 @@ err_alloc: } /** - * i40e_setup_pf_switch_element - set pf vars based on switch type + * i40e_setup_pf_switch_element - set PF vars based on switch type * @pf: board private structure * @ele: element we are building info from * @num_reported: total number of elements @@ -8905,15 +9394,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); - i40e_link_event(pf); - - /* Initialize user-specific link properties */ - pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & - I40E_AQ_AN_COMPLETED) ? true : false); - - /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -8983,7 +9464,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_DCB_CAPABLE; dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } - pf->num_lan_qps = pf->rss_size_max; + pf->num_lan_qps = max_t(int, pf->rss_size_max, + num_online_cpus()); + pf->num_lan_qps = min_t(int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp); + queues_left -= pf->num_lan_qps; } @@ -9036,7 +9521,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) * i40e_setup_pf_filter_control - Setup PF static filter control * @pf: PF to be setup * - * i40e_setup_pf_filter_control sets up a pf's initial filter control + * i40e_setup_pf_filter_control sets up a PF's initial filter control * settings. If PE/FCoE are enabled then it will also set the per PF * based filter sizes required for them. It also enables Flow director, * ethertype and macvlan type filter settings for the pf. @@ -9081,8 +9566,10 @@ static void i40e_print_features(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); #endif - buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs); + buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) buf += sprintf(buf, "RSS "); @@ -9111,14 +9598,16 @@ static void i40e_print_features(struct i40e_pf *pf) * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl * - * i40e_probe initializes a pf identified by a pci_dev structure. - * The OS initialization, configuring of the pf private structure, + * i40e_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, * and a hardware reset occur. * * Returns 0 on success, negative on failure **/ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct i40e_aq_get_phy_abilities_resp abilities; + unsigned long ioremap_len; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; @@ -9170,8 +9659,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &pf->hw; hw->back = pf; - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + + ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), + I40E_MAX_CSR_SPACE); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", @@ -9249,7 +9741,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); - i40e_verify_eeprom(pf); /* Rev 0 hardware was never productized */ @@ -9342,7 +9833,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up the main switch operations */ i40e_determine_queue_usage(pf); - i40e_init_interrupt_scheme(pf); + err = i40e_init_interrupt_scheme(pf); + if (err) + goto err_switch_setup; /* The number of VSIs reported by the FW is the minimum guaranteed * to us; HW supports far more and we share the remaining pool with @@ -9384,13 +9877,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); - msleep(75); - err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (err) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. @@ -9474,6 +9968,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } + /* get the requested speeds from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); + if (err) + dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", + err); + pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* print a string summarizing features */ i40e_print_features(pf); @@ -9492,7 +9993,6 @@ err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); err_init_lan_hmc: kfree(pf->qp_pile); - kfree(pf->irq_pile); err_sw_init: err_adminq_setup: (void)i40e_shutdown_adminq(hw); @@ -9533,6 +10033,7 @@ static void i40e_remove(struct pci_dev *pdev) set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); @@ -9559,12 +10060,6 @@ static void i40e_remove(struct pci_dev *pdev) if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); - i40e_stop_misc_vector(pf); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - synchronize_irq(pf->msix_entries[0].vector); - free_irq(pf->msix_entries[0].vector, pf); - } - /* shutdown and destroy the HMC */ if (pf->hw.hmc.hmc_obj) { ret_code = i40e_shutdown_lan_hmc(&pf->hw); @@ -9597,7 +10092,6 @@ static void i40e_remove(struct pci_dev *pdev) } kfree(pf->qp_pile); - kfree(pf->irq_pile); kfree(pf->vsi); iounmap(pf->hw.hw_addr); @@ -9718,6 +10212,8 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_clear_interrupt_scheme(pf); + if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); @@ -9738,6 +10234,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); @@ -9822,6 +10320,7 @@ static int __init i40e_init_module(void) pr_info("%s: %s - version %s\n", i40e_driver_name, i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + i40e_dbg_init(); return pci_register_driver(&i40e_driver); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 3e70f2e45a47..554e49d02683 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) } /** - * i40e_read_nvm_word - Reads Shadow RAM + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) +static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; @@ -212,7 +212,21 @@ read_nvm_exit: } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + return i40e_read_nvm_word_srctl(hw, offset, data); +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read @@ -222,8 +236,8 @@ read_nvm_exit: * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { i40e_status ret_code = 0; u16 index, word; @@ -231,7 +245,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, /* Loop thru the selected region */ for (word = 0; word < *words; word++) { index = offset + word; - ret_code = i40e_read_nvm_word(hw, index, &data[word]); + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); if (ret_code) break; } @@ -243,6 +257,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} + +/** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning @@ -302,11 +333,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { i40e_status ret_code = 0; + struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module = 0; - u16 word = 0; - u32 i = 0; + u16 *data; + u16 i = 0; + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; /* read pointer to VPD area */ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); @@ -317,7 +355,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, /* read pointer to PCIe Alt Auto-load module */ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, - &pcie_alt_module); + &pcie_alt_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -327,33 +365,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + /* Skip Checksum word */ if (i == I40E_SR_SW_CHECKSUM_WORD) - i++; + continue; /* Skip VPD module (convert byte size to word count) */ - if (i == (u32)vpd_module) { - i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; } /* Skip PCIe ALT module (convert byte size to word count) */ - if (i == (u32)pcie_alt_module) { - i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; } - ret_code = i40e_read_nvm_word(hw, (u16)i, &word); - if (ret_code) { - ret_code = I40E_ERR_NVM_CHECKSUM; - goto i40e_calc_nvm_checksum_exit; - } - checksum_local += word; + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); return ret_code; } @@ -679,9 +724,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, { i40e_status status; enum i40e_nvmupd_cmd upd_cmd; + bool retry_attempt = false; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); +retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); @@ -725,6 +772,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, *errno = -ESRCH; break; } + + /* In some circumstances, a multi-write transaction takes longer + * than the default 3 minute timeout on the write semaphore. If + * the write failed with an EBUSY status, this is likely the problem, + * so here we try to reacquire the semaphore then retry the write. + * We only do one retry, then give up. + */ + if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + !retry_attempt) { + i40e_status old_status = status; + u32 old_asq_status = hw->aq.asq_last_status; + u32 gtime; + + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + if (gtime >= hw->nvm.hw_semaphore_timeout) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", + gtime, hw->nvm.hw_semaphore_timeout); + i40e_release_nvm(hw); + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore reacquire failed aq_err = %d\n", + hw->aq.asq_last_status); + status = old_status; + hw->aq.asq_last_status = old_asq_status; + } else { + retry_attempt = true; + goto retry; + } + } + } + return status; } @@ -741,13 +821,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, int *errno) { enum i40e_nvmupd_cmd upd_cmd; - u8 transaction, module; + u8 transaction; /* anything that doesn't match a recognized case is an error */ upd_cmd = I40E_NVMUPD_INVALID; transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); /* limits on data size */ if ((cmd->data_size < 1) || diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 68e852a96680..7b34f1e660ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, @@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse); i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details); @@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); bool i40e_get_link_status(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, + bool *max_valid); +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); @@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access); void i40e_release_nvm(struct i40e_hw *hw); -i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset, - u16 *data); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, @@ -299,4 +303,9 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index fabcfa1b45b2..a92b7725dec3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -57,7 +57,7 @@ * timespec. However, since the registers are 64 bits of nanoseconds, we must * convert the result to a timespec before we can return. **/ -static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) { struct i40e_hw *hw = &pf->hw; u32 hi, lo; @@ -69,7 +69,7 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) ns = (((u64)hi) << 32) | lo; - *ts = ns_to_timespec(ns); + *ts = ns_to_timespec64(ns); } /** @@ -81,10 +81,10 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) * we receive a timespec from the stack, we must convert that timespec into * nanoseconds before programming the registers. **/ -static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts) +static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) { struct i40e_hw *hw = &pf->hw; - u64 ns = timespec_to_ns(ts); + u64 ns = timespec64_to_ns(ts); /* The timer will not update until the high register is written, so * write the low register first. @@ -159,14 +159,14 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec now, then = ns_to_timespec(delta); + struct timespec64 now, then = ns_to_timespec64(delta); unsigned long flags; spin_lock_irqsave(&pf->tmreg_lock, flags); i40e_ptp_read(pf, &now); - now = timespec_add(now, then); - i40e_ptp_write(pf, (const struct timespec *)&now); + now = timespec64_add(now, then); + i40e_ptp_write(pf, (const struct timespec64 *)&now); spin_unlock_irqrestore(&pf->tmreg_lock, flags); @@ -181,7 +181,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) * Read the device clock and return the correct value on ns, after converting it * into a timespec struct. **/ -static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); unsigned long flags; @@ -202,7 +202,7 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * to ns happens in the write function. **/ static int i40e_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); unsigned long flags; @@ -613,8 +613,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->ptp_caps.pps = 0; pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; pf->ptp_caps.adjtime = i40e_ptp_adjtime; - pf->ptp_caps.gettime = i40e_ptp_gettime; - pf->ptp_caps.settime = i40e_ptp_settime; + pf->ptp_caps.gettime64 = i40e_ptp_gettime; + pf->ptp_caps.settime64 = i40e_ptp_settime; pf->ptp_caps.enable = i40e_ptp_feature_enable; /* Attempt to register the clock before enabling the hardware. */ @@ -673,7 +673,7 @@ void i40e_ptp_init(struct i40e_pf *pf) dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", __func__); } else { - struct timespec ts; + struct timespec64 ts; u32 regval; dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, @@ -695,7 +695,7 @@ void i40e_ptp_init(struct i40e_pf *pf) i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); /* Set the clock value. */ - ts = ktime_to_timespec(ktime_get_real()); + ts = ktime_to_timespec64(ktime_get_real()); i40e_ptp_settime(&pf->ptp_caps, &ts); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 65d3c8bb2d5b..522d6df51330 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2206d2d36f0f..4bd3a80aba82 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40e.h" #include "i40e_prototype.h" @@ -44,7 +45,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, * i40e_program_fdir_filter - Program a Flow Director filter * @fdir_data: Packet data that will be filter parameters * @raw_packet: the pre-allocated packet buffer for FDir - * @pf: The pf pointer + * @pf: The PF pointer * @add: True for add/update, False for remove **/ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, @@ -227,7 +228,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d\n", @@ -302,7 +303,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", fd_data->pctype, fd_data->fd_id); @@ -375,7 +376,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d\n", @@ -470,12 +471,27 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", rx_desc->wb.qword0.hi_dword.fd_id); + /* Check if the programming error is for ATR. + * If so, auto disable ATR and set a state for + * flush in progress. Next time we come here if flush is in + * progress do nothing, once flush is complete the state will + * be cleared. + */ + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + pf->fd_add_err++; /* store the current atr filter count */ pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); + if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + } + /* filter programming failed most likely due to table full */ - fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); + fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; /* If ATR is running fcnt_prog can quickly change, * if we are very close to full, it makes sense to disable @@ -586,6 +602,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * @@ -594,10 +624,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) **/ static u32 i40e_get_tx_pending(struct i40e_ring *ring) { - u32 ntu = ((ring->next_to_clean <= ring->next_to_use) - ? ring->next_to_use - : ring->next_to_use + ring->count); - return ntu - ring->next_to_clean; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } /** @@ -606,6 +642,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) **/ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = i40e_get_tx_pending(tx_ring); struct i40e_pf *pf = tx_ring->vsi->back; bool ret = false; @@ -623,41 +661,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending >= I40E_MIN_DESC_PENDING)) { + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending < I40E_MIN_DESC_PENDING) && - (tx_pending > 0)) { + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { if (I40E_DEBUG_FLOW & pf->hw.debug_mask) dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", tx_pending, tx_ring->queue_index); pf->tx_sluggish_count++; } else { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + tx_ring->tx_stats.tx_done_old = tx_done; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - #define WB_STRIDE 0x3 /** @@ -748,6 +770,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, 0); } + prefetch(tx_desc); + /* update budget accounting */ budget--; } while (likely(budget)); @@ -835,6 +859,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ @@ -1025,6 +1050,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -1083,6 +1124,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40e_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1142,11 +1214,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_alloc_rx_buffers - Replace used receive buffers; packet split + * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1186,40 +1323,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -1273,10 +1378,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -1404,13 +1509,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -1426,25 +1531,54 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (budget <= 0) return 0; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); - goto next_desc; + I40E_RX_INCREMENT(rx_ring, i); + continue; } rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -1459,40 +1593,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1514,22 +1638,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1538,7 +1656,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1564,33 +1682,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) #ifdef I40E_FCOE if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { dev_kfree_skb_any(skb); - goto next_desc; + continue; } #endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers(rx_ring, cleaned_count); + i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1598,10 +1832,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40e_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1622,6 +1853,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1641,8 +1873,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1709,6 +1947,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; + if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + /* if sampling is disabled do nothing */ if (!tx_ring->atr_sample_rate) return; @@ -1816,6 +2057,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, __be16 protocol = skb->protocol; u32 tx_flags = 0; + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + /* if we have a HW VLAN tag being added, default to the HW one */ if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; @@ -1832,6 +2086,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } + if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + goto out; + /* Insert 802.1p priority into VLAN header */ if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL)) { @@ -1852,6 +2109,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_HW_VLAN; } } + +out: *flags = tx_flags; return 0; } @@ -1976,8 +2235,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + u32 l4_tunnel = 0; if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } network_hdr_len = skb_inner_network_header_len(skb); this_ip_hdr = inner_ip_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb); @@ -2000,8 +2267,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - I40E_TXD_CTX_UDP_TUNNELING | + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; @@ -2140,6 +2407,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, + const u8 hdr_len) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 1; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + size = hdr_len; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if (j == I40E_MAX_BUFFER_TXD) { + if (size < skb_shinfo(skb)->gso_size) { + linearize = true; + break; + } + j = 1; + size -= skb_shinfo(skb)->gso_size; + if (size) + j++; + size += hdr_len; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + +/** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer @@ -2396,6 +2724,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; + if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (skb_linearize(skb)) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 18b00231d2f1..4b0b8102cdc3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -112,6 +120,7 @@ enum i40e_dyn_idx_t { #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 #define I40E_MAX_DATA_PER_TXD 8192 @@ -151,6 +160,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -223,8 +233,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -280,7 +290,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index e9901ef06a63..568e855da0f3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -44,6 +44,7 @@ #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -175,12 +176,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -241,6 +242,7 @@ struct i40e_hw_capabilities { u8 rx_buf_chain_len; u32 enabled_tcmap; u32 maxtc; + u64 wr_csr_prot; }; struct i40e_mac_info { @@ -1143,7 +1145,7 @@ struct i40e_hw_port_stats { #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 -#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D @@ -1401,6 +1403,19 @@ struct i40e_lldp_variables { u16 crc8; }; +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 61dd1b187624..2d20af290fbf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 40f042af4131..78d1c4ff565e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -26,12 +26,135 @@ #include "i40e.h" +/*********************notification routines***********************/ + +/** + * i40e_vc_vf_broadcast + * @pf: pointer to the PF structure + * @opcode: operation code + * @retval: return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send a message to all VFs on a given PF + **/ +static void i40e_vc_vf_broadcast(struct i40e_pf *pf, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, + u16 msglen) +{ + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* Not all vfs are enabled so skip the ones that are not */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + continue; + + /* Ignore return value on purpose - a given VF may fail, but + * we need to keep going and send to all of them + */ + i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, + msg, msglen, NULL); + } +} + +/** + * i40e_vc_notify_link_state + * @vf: pointer to the VF structure + * + * send a link status message to a single VF + **/ +static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) +{ + struct i40e_virtchnl_pf_event pfe; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *ls = &pf->hw.phy.link_info; + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + } else { + pfe.event_data.link_event.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = ls->link_speed; + } + i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); +} + +/** + * i40e_vc_notify_link_state + * @pf: pointer to the PF structure + * + * send a link status message to all VFs on a given PF + **/ +void i40e_vc_notify_link_state(struct i40e_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + i40e_vc_notify_vf_link_state(&pf->vf[i]); +} + +/** + * i40e_vc_notify_reset + * @pf: pointer to the PF structure + * + * indicate a pending reset to all VFs on a given PF + **/ +void i40e_vc_notify_reset(struct i40e_pf *pf) +{ + struct i40e_virtchnl_pf_event pfe; + + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); +} + +/** + * i40e_vc_notify_vf_reset + * @vf: pointer to the VF structure + * + * indicate a pending reset to the given VF + **/ +void i40e_vc_notify_vf_reset(struct i40e_vf *vf) +{ + struct i40e_virtchnl_pf_event pfe; + int abs_vf_id; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return; + + /* verify if the VF is in either init or active before proceeding */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + return; + + abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, + sizeof(struct i40e_virtchnl_pf_event), NULL); +} /***********************misc routines*****************************/ /** * i40e_vc_disable_vf - * @pf: pointer to the pf info - * @vf: pointer to the vf info + * @pf: pointer to the PF info + * @vf: pointer to the VF info * * Disable the VF through a SW reset **/ @@ -48,38 +171,40 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) /** * i40e_vc_isvalid_vsi_id - * @vf: pointer to the vf info - * @vsi_id: vf relative vsi id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id * - * check for the valid vsi id + * check for the valid VSI id **/ -static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) +static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) { struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); - return pf->vsi[vsi_id]->vf_id == vf->vf_id; + return (vsi && (vsi->vf_id == vf->vf_id)); } /** * i40e_vc_isvalid_queue_id - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @vsi_id: vsi id * @qid: vsi relative queue id * * check for the valid queue id **/ -static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, +static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, u8 qid) { struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); - return qid < pf->vsi[vsi_id]->alloc_queue_pairs; + return (vsi && (qid < vsi->alloc_queue_pairs)); } /** * i40e_vc_isvalid_vector_id - * @vf: pointer to the vf info - * @vector_id: vf relative vector id + * @vf: pointer to the VF info + * @vector_id: VF relative vector id * * check for the valid vector id **/ @@ -94,19 +219,22 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) /** * i40e_vc_get_pf_queue_id - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue id * - * return pf relative queue id + * return PF relative queue id **/ -static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, +static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, u8 vsi_queue_id) { struct i40e_pf *pf = vf->pf; - struct i40e_vsi *vsi = pf->vsi[vsi_idx]; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; + if (!vsi) + return pf_queue_id; + if (le16_to_cpu(vsi->info.mapping_flags) & I40E_AQ_VSI_QUE_MAP_NONCONTIG) pf_queue_id = @@ -120,13 +248,13 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, /** * i40e_config_irq_link_list - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as given by the FW * @vecmap: irq map info * * configure irq link list from the map **/ -static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, +static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, struct i40e_virtchnl_vector_map *vecmap) { unsigned long linklistmap = 0, tempmap; @@ -171,7 +299,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, I40E_VIRTCHNL_SUPPORTED_QTYPES)); vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); wr32(hw, reg_idx, reg); @@ -198,7 +326,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); } else { pf_queue_id = I40E_QUEUE_END_OF_LIST; @@ -220,25 +348,27 @@ irq_list_done: /** * i40e_config_vsi_tx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue index * @info: config. info * * configure tx queue **/ -static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, +static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct i40e_virtchnl_txq_info *info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_hmc_obj_txq tx_ctx; + struct i40e_vsi *vsi; u16 pf_queue_id; u32 qtx_ctl; int ret = 0; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + vsi = i40e_find_vsi_from_id(pf, vsi_id); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); @@ -246,7 +376,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, /* only set the required fields */ tx_ctx.base = info->dma_ring_addr / 128; tx_ctx.qlen = info->ring_len; - tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); + tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); tx_ctx.rdylist_act = 0; tx_ctx.head_wb_ena = info->headwb_enabled; tx_ctx.head_wb_addr = info->dma_headwb_addr; @@ -287,14 +417,14 @@ error_context: /** * i40e_config_vsi_rx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue index * @info: config. info * * configure rx queue **/ -static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, +static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct i40e_virtchnl_rxq_info *info) { @@ -304,7 +434,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, u16 pf_queue_id; int ret = 0; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); @@ -378,10 +508,10 @@ error_param: /** * i40e_alloc_vsi_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @type: type of VSI to allocate * - * alloc vf vsi context & resources + * alloc VF vsi context & resources **/ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) { @@ -394,18 +524,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (!vsi) { dev_err(&pf->pdev->dev, - "add vsi failed for vf %d, aq_err %d\n", + "add vsi failed for VF %d, aq_err %d\n", vf->vf_id, pf->hw.aq.asq_last_status); ret = -ENOENT; goto error_alloc_vsi_res; } if (type == I40E_VSI_SRIOV) { u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - vf->lan_vsi_index = vsi->idx; + vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; - dev_info(&pf->pdev->dev, - "VF %d assigned LAN VSI index %d, VSI id %d\n", - vf->vf_id, vsi->idx, vsi->id); /* If the port VLAN has been configured and then the * VF driver was removed then the VSI port VLAN * configuration was destroyed. Check if there is @@ -446,9 +573,9 @@ error_alloc_vsi_res: /** * i40e_enable_vf_mappings - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * enable vf mappings + * enable VF mappings **/ static void i40e_enable_vf_mappings(struct i40e_vf *vf) { @@ -469,8 +596,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); /* map PF queues to VF queues */ - for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) { - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); + for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); total_queue_pairs++; @@ -478,13 +605,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /* map PF queues to VSI */ for (j = 0; j < 7; j++) { - if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) { + if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { reg = 0x07FF07FF; /* unused */ } else { - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j * 2); reg = qid; - qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, + qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, (j * 2) + 1); reg |= qid << 16; } @@ -496,9 +623,9 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /** * i40e_disable_vf_mappings - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * disable vf mappings + * disable VF mappings **/ static void i40e_disable_vf_mappings(struct i40e_vf *vf) { @@ -516,9 +643,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf) /** * i40e_free_vf_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * free vf resources + * free VF resources **/ static void i40e_free_vf_res(struct i40e_vf *vf) { @@ -528,9 +655,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf) int i, msix_vf; /* free vsi & disconnect it from the parent uplink */ - if (vf->lan_vsi_index) { - i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); - vf->lan_vsi_index = 0; + if (vf->lan_vsi_idx) { + i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); + vf->lan_vsi_idx = 0; vf->lan_vsi_id = 0; } msix_vf = pf->hw.func_caps.num_msix_vectors_vf; @@ -571,9 +698,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf) /** * i40e_alloc_vf_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * allocate vf resources + * allocate VF resources **/ static int i40e_alloc_vf_res(struct i40e_vf *vf) { @@ -585,15 +712,15 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); if (ret) goto error_alloc; - total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; + total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime - * vf req validation + * VF req validation */ vf->num_queue_pairs = total_queue_pairs; - /* vf is now completely initialized */ + /* VF is now completely initialized */ set_bit(I40E_VF_STAT_INIT, &vf->vf_states); error_alloc: @@ -607,7 +734,7 @@ error_alloc: #define VF_TRANS_PENDING_MASK 0x20 /** * i40e_quiesce_vf_pci - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * * Wait for VF PCI transactions to be cleared after reset. Returns -EIO * if the transactions never clear. @@ -634,10 +761,10 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) /** * i40e_reset_vf - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * reset the vf + * reset the VF **/ void i40e_reset_vf(struct i40e_vf *vf, bool flr) { @@ -657,7 +784,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) * just need to clean up, so don't hit the VFRTRIG register. */ if (!flr) { - /* reset vf using VPGEN_VFRTRIG reg */ + /* reset VF using VPGEN_VFRTRIG reg */ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); @@ -685,6 +812,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) } } + if (flr) + usleep_range(10000, 20000); + if (!rsd) dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", vf->vf_id); @@ -695,12 +825,12 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); /* On initial reset, we won't have any queues */ - if (vf->lan_vsi_index == 0) + if (vf->lan_vsi_idx == 0) goto complete_reset; - i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); + i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); complete_reset: - /* reallocate vf resources to reset the VSI state */ + /* reallocate VF resources to reset the VSI state */ i40e_free_vf_res(vf); i40e_alloc_vf_res(vf); i40e_enable_vf_mappings(vf); @@ -713,78 +843,10 @@ complete_reset: } /** - * i40e_enable_pf_switch_lb - * @pf: pointer to the pf structure - * - * enable switch loop back or die - no point in a return value - **/ -void i40e_enable_pf_switch_lb(struct i40e_pf *pf) -{ - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - struct i40e_vsi_context ctxt; - int aq_ret; - - ctxt.seid = pf->main_vsi_seid; - ctxt.pf_num = pf->hw.pf_id; - ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s couldn't get pf vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); - return; - } - ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); - } -} - -/** - * i40e_disable_pf_switch_lb - * @pf: pointer to the pf structure - * - * disable switch loop back or die - no point in a return value - **/ -static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) -{ - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - struct i40e_vsi_context ctxt; - int aq_ret; - - ctxt.seid = pf->main_vsi_seid; - ctxt.pf_num = pf->hw.pf_id; - ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s couldn't get pf vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); - return; - } - ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); - } -} - -/** * i40e_free_vfs - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * - * free vf resources + * free VF resources **/ void i40e_free_vfs(struct i40e_pf *pf) { @@ -797,16 +859,23 @@ void i40e_free_vfs(struct i40e_pf *pf) while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) usleep_range(1000, 2000); + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + false); + /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank * the carpet out from underneath their feet. */ if (!pci_vfs_assigned(pf->pdev)) pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); msleep(20); /* let any messages in transit get finished up */ - /* free up vf resources */ + /* free up VF resources */ tmp = pf->num_alloc_vfs; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { @@ -832,10 +901,6 @@ void i40e_free_vfs(struct i40e_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); } - i40e_disable_pf_switch_lb(pf); - } else { - dev_warn(&pf->pdev->dev, - "unable to disable SR-IOV because VFs are assigned.\n"); } clear_bit(__I40E_VF_DISABLE, &pf->state); } @@ -843,10 +908,10 @@ void i40e_free_vfs(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV /** * i40e_alloc_vfs - * @pf: pointer to the pf structure - * @num_alloc_vfs: number of vfs to allocate + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate * - * allocate vf resources + * allocate VF resources **/ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) { @@ -883,15 +948,14 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - /* vf resources get allocated during reset */ + /* VF resources get allocated during reset */ i40e_reset_vf(&vfs[i], false); - /* enable vf vplan_qtable mappings */ + /* enable VF vplan_qtable mappings */ i40e_enable_vf_mappings(&vfs[i]); } pf->num_alloc_vfs = num_alloc_vfs; - i40e_enable_pf_switch_lb(pf); err_alloc: if (ret) i40e_free_vfs(pf); @@ -905,7 +969,7 @@ err_iov: /** * i40e_pci_sriov_enable * @pdev: pointer to a pci_dev structure - * @num_vfs: number of vfs to allocate + * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs **/ @@ -945,7 +1009,7 @@ err_out: /** * i40e_pci_sriov_configure * @pdev: pointer to a pci_dev structure - * @num_vfs: number of vfs to allocate + * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs. Called when the user updates the number * of VFs in sysfs. @@ -970,13 +1034,13 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) /** * i40e_vc_send_msg_to_vf - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length * - * send msg to vf + * send msg to VF **/ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) @@ -1025,11 +1089,11 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /** * i40e_vc_send_resp_to_vf - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @opcode: operation code * @retval: return value * - * send resp msg to vf + * send resp msg to VF **/ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, enum i40e_virtchnl_ops opcode, @@ -1040,9 +1104,9 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, /** * i40e_vc_get_version_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * called from the vf to request the API version used by the PF + * called from the VF to request the API version used by the PF **/ static int i40e_vc_get_version_msg(struct i40e_vf *vf) { @@ -1058,11 +1122,11 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf) /** * i40e_vc_get_vf_resources_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to request its resources + * called from the VF to request its resources **/ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) { @@ -1090,18 +1154,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) } vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; - if (vf->lan_vsi_index) { - vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; + if (vf->lan_vsi_idx) { + vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; vfres->vsi_res[i].num_queue_pairs = - pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; + pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; memcpy(vfres->vsi_res[i].default_mac_addr, vf->default_lan_addr.addr, ETH_ALEN); i++; @@ -1109,7 +1173,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); err: - /* send the response back to the vf */ + /* send the response back to the VF */ ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, (u8 *)vfres, len); @@ -1119,13 +1183,13 @@ err: /** * i40e_vc_reset_vf_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to reset itself, - * unlike other virtchnl messages, pf driver - * doesn't send the response back to the vf + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF **/ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) { @@ -1135,12 +1199,12 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) /** * i40e_vc_config_promiscuous_mode_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the promiscuous mode of - * vf vsis + * called from the VF to configure the promiscuous mode of + * VF vsis **/ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1153,21 +1217,21 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, bool allmulti = false; i40e_status aq_ret; + vsi = i40e_find_vsi_from_id(pf, info->vsi_id); if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || - (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { + (vsi->type != I40E_VSI_FCOE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } - vsi = pf->vsi[info->vsi_id]; if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) allmulti = true; aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, NULL); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, aq_ret); @@ -1175,11 +1239,11 @@ error_param: /** * i40e_vc_config_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the rx/tx + * called from the VF to configure the rx/tx * queues **/ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1221,22 +1285,22 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } - /* set vsi num_queue_pairs in use to num configured by vf */ - pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; + /* set vsi num_queue_pairs in use to num configured by VF */ + pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret); } /** * i40e_vc_config_irq_map_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the irq to + * called from the VF to configure the irq to * queue map **/ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1288,18 +1352,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_config_irq_link_list(vf, vsi_id, map); } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret); } /** * i40e_vc_enable_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to enable all or specific queue(s) + * called from the VF to enable all or specific queue(s) **/ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { @@ -1323,21 +1387,22 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_PARAM; goto error_param; } - if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) aq_ret = I40E_ERR_TIMEOUT; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, aq_ret); } /** * i40e_vc_disable_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to disable all or specific + * called from the VF to disable all or specific * queue(s) **/ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1345,7 +1410,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_virtchnl_queue_select *vqs = (struct i40e_virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; - u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { @@ -1362,22 +1426,23 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_PARAM; goto error_param; } - if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) aq_ret = I40E_ERR_TIMEOUT; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, aq_ret); } /** * i40e_vc_get_stats_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to get vsi stats + * called from the VF to get vsi stats **/ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { @@ -1400,7 +1465,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - vsi = pf->vsi[vqs->vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1409,14 +1474,14 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) stats = vsi->eth_stats; error_param: - /* send the response back to the vf */ + /* send the response back to the VF */ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, (u8 *)&stats, sizeof(stats)); } /** * i40e_check_vf_permission - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @macaddr: pointer to the MAC Address being checked * * Check if the VF has permission to add or delete unicast MAC address @@ -1450,7 +1515,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) /** * i40e_vc_add_mac_addr_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1478,7 +1543,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) goto error_param; } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { @@ -1507,14 +1572,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, ret); } /** * i40e_vc_del_mac_addr_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1546,7 +1611,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) @@ -1558,14 +1623,14 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, ret); } /** * i40e_vc_add_vlan_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1596,7 +1661,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1613,13 +1678,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); } /** * i40e_vc_remove_vlan_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1649,7 +1714,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1664,13 +1729,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); } /** * i40e_vc_validate_vf_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @msghndl: msg handle @@ -1776,14 +1841,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, /** * i40e_vc_process_vf_msg - * @pf: pointer to the pf structure - * @vf_id: source vf id + * @pf: pointer to the PF structure + * @vf_id: source VF id * @msg: pointer to the msg buffer * @msglen: msg length * @msghndl: msg handle * * called from the common aeq/arq handler to - * process request from vf + * process request from VF **/ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) @@ -1801,7 +1866,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); if (ret) { - dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); return ret; } @@ -1828,6 +1893,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: ret = i40e_vc_enable_queues_msg(vf, msg, msglen); + i40e_vc_notify_vf_link_state(vf); break; case I40E_VIRTCHNL_OP_DISABLE_QUEUES: ret = i40e_vc_disable_queues_msg(vf, msg, msglen); @@ -1849,7 +1915,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, break; case I40E_VIRTCHNL_OP_UNKNOWN: default: - dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); ret = i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_NOT_IMPLEMENTED); @@ -1861,10 +1927,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, /** * i40e_vc_process_vflr_event - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * called from the vlfr irq handler to - * free up vf resources and state variables + * free up VF resources and state variables **/ int i40e_vc_process_vflr_event(struct i40e_pf *pf) { @@ -1885,7 +1951,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - /* read GLGEN_VFLRSTAT register to find out the flr vfs */ + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); if (reg & (1 << bit_idx)) { @@ -1901,124 +1967,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) } /** - * i40e_vc_vf_broadcast - * @pf: pointer to the pf structure - * @opcode: operation code - * @retval: return value - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * send a message to all VFs on a given PF - **/ -static void i40e_vc_vf_broadcast(struct i40e_pf *pf, - enum i40e_virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, - u16 msglen) -{ - struct i40e_hw *hw = &pf->hw; - struct i40e_vf *vf = pf->vf; - int i; - - for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* Not all vfs are enabled so skip the ones that are not */ - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) - continue; - - /* Ignore return value on purpose - a given VF may fail, but - * we need to keep going and send to all of them - */ - i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, - msg, msglen, NULL); - } -} - -/** - * i40e_vc_notify_link_state - * @pf: pointer to the pf structure - * - * send a link status message to all VFs on a given PF - **/ -void i40e_vc_notify_link_state(struct i40e_pf *pf) -{ - struct i40e_virtchnl_pf_event pfe; - struct i40e_hw *hw = &pf->hw; - struct i40e_vf *vf = pf->vf; - struct i40e_link_status *ls = &pf->hw.phy.link_info; - int i; - - pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; - pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; - for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - if (vf->link_forced) { - pfe.event_data.link_event.link_status = vf->link_up; - pfe.event_data.link_event.link_speed = - (vf->link_up ? I40E_LINK_SPEED_40GB : 0); - } else { - pfe.event_data.link_event.link_status = - ls->link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = ls->link_speed; - } - i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, - 0, (u8 *)&pfe, sizeof(pfe), - NULL); - } -} - -/** - * i40e_vc_notify_reset - * @pf: pointer to the pf structure - * - * indicate a pending reset to all VFs on a given PF - **/ -void i40e_vc_notify_reset(struct i40e_pf *pf) -{ - struct i40e_virtchnl_pf_event pfe; - - pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; - i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, - (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); -} - -/** - * i40e_vc_notify_vf_reset - * @vf: pointer to the vf structure - * - * indicate a pending reset to the given VF - **/ -void i40e_vc_notify_vf_reset(struct i40e_vf *vf) -{ - struct i40e_virtchnl_pf_event pfe; - int abs_vf_id; - - /* validate the request */ - if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) - return; - - /* verify if the VF is in either init or active before proceeding */ - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) - return; - - abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; - - pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; - i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, - I40E_SUCCESS, (u8 *)&pfe, - sizeof(struct i40e_virtchnl_pf_event), NULL); -} - -/** * i40e_ndo_set_vf_mac * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @mac: mac address * - * program vf mac address + * program VF mac address **/ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { @@ -2038,7 +1992,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } vf = &(pf->vf[vf_id]); - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); @@ -2083,11 +2037,11 @@ error_param: /** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @vlan_id: mac address * @qos: priority setting * - * program vf vlan id and/or qos + * program VF vlan id and/or qos **/ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos) @@ -2112,7 +2066,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, } vf = &(pf->vf[vf_id]); - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); ret = -EINVAL; @@ -2196,10 +2150,10 @@ error_pvid: /** * i40e_ndo_set_vf_bw * @netdev: network interface device structure - * @vf_id: vf identifier - * @tx_rate: tx rate + * @vf_id: VF identifier + * @tx_rate: Tx rate * - * configure vf tx rate + * configure VF Tx rate **/ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) @@ -2219,13 +2173,13 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } if (min_tx_rate) { - dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n", + dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", min_tx_rate, vf_id); return -EINVAL; } vf = &(pf->vf[vf_id]); - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); ret = -EINVAL; @@ -2247,7 +2201,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } if (max_tx_rate > speed) { - dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.", + dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", max_tx_rate, vf->vf_id); ret = -EINVAL; goto error; @@ -2276,10 +2230,10 @@ error: /** * i40e_ndo_get_vf_config * @netdev: network interface device structure - * @vf_id: vf identifier - * @ivi: vf configuration structure + * @vf_id: VF identifier + * @ivi: VF configuration structure * - * return vf configuration + * return VF configuration **/ int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) @@ -2299,7 +2253,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, vf = &(pf->vf[vf_id]); /* first vsi is always the LAN vsi */ - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); ret = -EINVAL; @@ -2331,7 +2285,7 @@ error_param: /** * i40e_ndo_set_vf_link_state * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @link: required link state * * Set the link state of a specified VF, regardless of physical link state @@ -2394,7 +2348,7 @@ error_out: /** * i40e_ndo_set_vf_spoofchk * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @enable: flag to enable or disable feature * * Enable or disable VF spoof checking @@ -2423,11 +2377,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) vf->spoofchk = enable; memset(&ctxt, 0, sizeof(ctxt)); - ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid; + ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; ctxt.pf_num = pf->hw.pf_id; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); if (enable) - ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 9452f5247cff..09043c1aae54 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -71,12 +71,12 @@ enum i40e_vf_capabilities { struct i40e_vf { struct i40e_pf *pf; - /* vf id in the pf space */ + /* VF id in the PF space */ u16 vf_id; - /* all vf vsis connect to the same parent */ + /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; - /* vf Port Extender (PE) stag if used */ + /* VF Port Extender (PE) stag if used */ u16 stag; struct i40e_virtchnl_ether_addr default_lan_addr; @@ -88,10 +88,10 @@ struct i40e_vf { * When assigned, these will be non-zero, because VSI 0 is always * the main LAN VSI for the PF. */ - u8 lan_vsi_index; /* index into PF struct */ + u8 lan_vsi_idx; /* index into PF struct */ u8 lan_vsi_id; /* ID as used by firmware */ - u8 num_queue_pairs; /* num of qps assigned to vf vsis */ + u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ u64 num_valid_msgs; /* num of valid msgs detected */ @@ -100,7 +100,7 @@ struct i40e_vf { unsigned long vf_states; /* vf's runtime states */ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; - bool link_up; /* only valid if vf link is forced */ + bool link_up; /* only valid if VF link is forced */ bool spoofchk; }; @@ -113,7 +113,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf); void i40e_reset_vf(struct i40e_vf *vf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); -/* vf configuration related iplink handlers */ +/* VF configuration related iplink handlers */ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); @@ -126,6 +126,5 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); -void i40e_enable_pf_switch_lb(struct i40e_pf *pf); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 60f04e96a80e..ef43d68f67b3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 28c40c57d4f5..39fcb1dc4ea6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -51,6 +51,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -85,46 +86,53 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = le16_to_cpu(aq_desc->datalen); - u8 *aq_buffer = (u8 *)buffer; - u32 data[4]; - u32 i = 0; + u8 *buf = (u8 *)buffer; + u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { - memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - for (i = 0; i < len; i++) { - data[((i % 16) / 4)] |= - ((u32)aq_buffer[i]) << (8 * (i % 4)); - if ((i % 16) == 15) { - i40e_debug(hw, mask, - "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); - memset(data, 0, sizeof(data)); - } + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); } - if ((i % 16) != 0) - i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); } } @@ -535,7 +543,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; - /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 9173834825ac..58e37a44b80a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -59,8 +59,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); i40e_status i40e_set_mac_type(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c1f6a59bfea0..3cc737629bf7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 29004382f462..b077e02a0cc7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40evf.h" #include "i40e_prototype.h" @@ -126,6 +127,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * @@ -134,10 +149,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) **/ static u32 i40e_get_tx_pending(struct i40e_ring *ring) { - u32 ntu = ((ring->next_to_clean <= ring->next_to_use) - ? ring->next_to_use - : ring->next_to_use + ring->count); - return ntu - ring->next_to_clean; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } /** @@ -146,6 +167,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) **/ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = i40e_get_tx_pending(tx_ring); bool ret = false; @@ -162,36 +185,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending >= I40E_MIN_DESC_PENDING)) { + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || - !(tx_pending < I40E_MIN_DESC_PENDING) || - !(tx_pending > 0)) { + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + tx_ring->tx_stats.tx_done_old = tx_done; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - #define WB_STRIDE 0x3 /** @@ -282,6 +289,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, 0); } + prefetch(tx_desc); + /* update budget accounting */ budget--; } while (likely(budget)); @@ -362,6 +371,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ @@ -523,6 +533,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -581,6 +607,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40evf_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -640,11 +697,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split + * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -684,40 +806,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -771,10 +861,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -825,9 +915,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * so the total length of IPv4 header is IHL*4 bytes * The UDP_0 bit *may* bet set if the *inner* header is UDP */ - if (ipv4_tunnel && - (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && - !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { + if (ipv4_tunnel) { skb->transport_header = skb->mac_header + sizeof(struct ethhdr) + (ip_hdr(skb)->ihl * 4); @@ -837,15 +925,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, skb->protocol == htons(ETH_P_8021AD)) ? VLAN_HLEN : 0; - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic( - iph->saddr, iph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && + (udp_hdr(skb)->check != 0)) { + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + (skb->len - + skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); - if (udp_hdr(skb)->check != csum) - goto checksum_fail; + if (udp_hdr(skb)->check != csum) + goto checksum_fail; + + } /* else its GRE and so no outer UDP header */ } skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -900,13 +992,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -919,20 +1011,49 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u8 rx_ptype; u64 qword; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -947,40 +1068,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1002,22 +1113,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1026,7 +1131,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1042,30 +1147,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1073,10 +1282,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1097,6 +1303,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1116,8 +1323,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1157,6 +1370,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, __be16 protocol = skb->protocol; u32 tx_flags = 0; + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + /* if we have a HW VLAN tag being added, default to the HW one */ if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; @@ -1173,6 +1399,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } +out: *flags = tx_flags; return 0; } @@ -1206,17 +1433,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - if (protocol == htons(ETH_P_IP)) { - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); - } else if (skb_is_gso_v6(skb)) { - - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) - : ipv6_hdr(skb); + } else if (ipv6h->version == 6) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); ipv6h->payload_len = 0; tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, @@ -1257,8 +1483,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + u32 l4_tunnel = 0; if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } network_hdr_len = skb_inner_network_header_len(skb); this_ip_hdr = inner_ip_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb); @@ -1274,22 +1508,23 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } } else if (tx_flags & I40E_TX_FLAGS_IPV6) { - if (tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } } /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - I40E_TXD_CTX_UDP_TUNNELING | + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } + } else { network_hdr_len = skb_network_header_len(skb); @@ -1380,6 +1615,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } + /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, + const u8 hdr_len) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 1; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + size = hdr_len; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if (j == I40E_MAX_BUFFER_TXD) { + if (size < skb_shinfo(skb)->gso_size) { + linearize = true; + break; + } + j = 1; + size -= skb_shinfo(skb)->gso_size; + if (size) + j++; + size += hdr_len; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + /** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on @@ -1654,6 +1950,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; + if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (skb_linearize(skb)) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 4e15903b2b6d..1e49bb1fbac1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -112,6 +120,7 @@ enum i40e_dyn_idx_t { #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 #define I40E_MAX_DATA_PER_TXD 8192 @@ -150,6 +159,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -222,8 +232,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -277,7 +287,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3d0fdaab5cc8..ec9d83a93379 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -44,7 +44,8 @@ #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ @@ -175,12 +176,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -241,6 +242,7 @@ struct i40e_hw_capabilities { u8 rx_buf_chain_len; u32 enabled_tcmap; u32 maxtc; + u64 wr_csr_prot; }; struct i40e_mac_info { @@ -1116,7 +1118,7 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F -#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index e0c8208138f4..59f62f0e65dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 981224743c73..1b98c25b3092 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -225,7 +225,6 @@ struct i40evf_adapter { #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED /* flags for admin queue service task */ u32 aq_required; - u32 aq_pending; #define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) #define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) #define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) @@ -272,6 +271,8 @@ void i40evf_update_stats(struct i40evf_adapter *adapter); void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); void i40e_napi_add_all(struct i40evf_adapter *adapter); void i40e_napi_del_all(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 69b97bac182c..f4e77665bc54 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,7 +29,6 @@ #include <linux/uaccess.h> - struct i40evf_stats { char stat_string[ETH_GSTRING_LEN]; int stat_offset; @@ -180,7 +179,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) } /** - * i40evf_get_msglevel - Set debug message level + * i40evf_set_msglevel - Set debug message level * @netdev: network interface device structure * @data: message level * @@ -191,6 +190,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (I40E_DEBUG_USER & data) + adapter->hw.debug_mask = data; adapter->msg_enable = data; } @@ -208,7 +209,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, i40evf_driver_name, 32); strlcpy(drvinfo->version, i40evf_driver_version, 32); - + strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); } @@ -640,12 +641,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; + if (indir) { + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); + indir[j++] = hlut_val & 0xff; + indir[j++] = (hlut_val >> 8) & 0xff; + indir[j++] = (hlut_val >> 16) & 0xff; + indir[j++] = (hlut_val >> 24) & 0xff; + } } return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8d8c201c63c1..7c53aca4b5a6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -28,15 +28,13 @@ #include "i40e_prototype.h" static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); -static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); -static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); static int i40evf_close(struct net_device *netdev); char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.0" +#define DRV_VERSION "1.2.25" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -244,6 +242,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << (i - 1))) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); } } @@ -263,6 +262,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); } @@ -270,6 +270,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << i)) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); } @@ -524,7 +525,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) int err; snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx"); + sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, &i40evf_msix_aq, 0, adapter->misc_vector_name, netdev); @@ -662,13 +664,21 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) static struct i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct i40evf_vlan_filter *f = NULL; + int count = 50; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) + goto out; + } f = i40evf_find_vlan(adapter, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) - return NULL; + goto clearout; f->vlan = vlan; @@ -678,6 +688,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; } +clearout: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +out: return f; } @@ -689,12 +702,21 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) { struct i40evf_vlan_filter *f; + int count = 50; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) + return; + } f = i40evf_find_vlan(adapter, vlan); if (f) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; } + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } /** @@ -761,13 +783,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, u8 *macaddr) { struct i40evf_mac_filter *f; + int count = 50; if (!macaddr) return NULL; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) + return NULL; + } f = i40evf_find_filter(adapter, macaddr); if (!f) { @@ -828,6 +854,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct i40evf_mac_filter *f, *ftmp; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; + int count = 50; /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { @@ -838,8 +865,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev) } while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) { + dev_err(&adapter->pdev->dev, + "Failed to get lock in %s\n", __func__); + return; + } + } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { bool found = false; @@ -920,7 +953,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, ring->count); + i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -958,6 +991,9 @@ void i40evf_down(struct i40evf_adapter *adapter) &adapter->crit_section)) usleep_range(500, 1000); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + i40evf_napi_disable_all(adapter); i40evf_irq_disable(adapter); /* remove all MAC filters */ @@ -972,7 +1008,6 @@ void i40evf_down(struct i40evf_adapter *adapter) adapter->state != __I40EVF_RESETTING) { /* cancel any current operation */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; - adapter->aq_pending = 0; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this. @@ -981,15 +1016,7 @@ void i40evf_down(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; } - netif_tx_disable(netdev); - - netif_tx_stop_all_queues(netdev); - - i40evf_napi_disable_all(adapter); - - msleep(20); - netif_carrier_off(netdev); clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -1307,7 +1334,6 @@ static void i40evf_watchdog_task(struct work_struct *work) */ return; } - adapter->aq_pending = 0; adapter->aq_required = 0; adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; goto watchdog_done; @@ -1327,7 +1353,6 @@ static void i40evf_watchdog_task(struct work_struct *work) adapter->flags |= I40EVF_FLAG_RESET_PENDING; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); schedule_work(&adapter->reset_task); - adapter->aq_pending = 0; adapter->aq_required = 0; adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; goto watchdog_done; @@ -1336,7 +1361,7 @@ static void i40evf_watchdog_task(struct work_struct *work) /* Process admin queue tasks. After init, everything gets done * here so we don't race on the admin queue. */ - if (adapter->aq_pending) { + if (adapter->current_op) { if (!i40evf_asq_done(hw)) { dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); i40evf_send_api_ver(adapter); @@ -1344,6 +1369,11 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { + i40evf_disable_queues(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { i40evf_map_queues(adapter); goto watchdog_done; @@ -1369,11 +1399,6 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { - i40evf_disable_queues(adapter); - goto watchdog_done; - } - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { i40evf_configure_queues(adapter); goto watchdog_done; @@ -1407,41 +1432,22 @@ restart_watchdog: } /** - * next_queue - increment to next available tx queue - * @adapter: board private structure - * @j: queue counter - * - * Helper function for RSS programming to increment through available - * queus. Returns the next queue value. - **/ -static int next_queue(struct i40evf_adapter *adapter, int j) -{ - j += 1; - - return j >= adapter->num_active_queues ? 0 : j; -} - -/** - * i40evf_configure_rss - Prepare for RSS if used + * i40evf_configure_rss - Prepare for RSS * @adapter: board private structure **/ static void i40evf_configure_rss(struct i40evf_adapter *adapter) { u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &adapter->hw; + u32 cqueue = 0; u32 lut = 0; int i, j; u64 hena; - /* No RSS for single queue. */ - if (adapter->num_active_queues == 1) { - wr32(hw, I40E_VFQF_HENA(0), 0); - wr32(hw, I40E_VFQF_HENA(1), 0); - return; - } - /* Hash type is configured by the PF - we just supply the key */ netdev_rss_key_fill(rss_key, sizeof(rss_key)); + + /* Fill out hash function seed */ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); @@ -1451,16 +1457,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ - j = adapter->num_active_queues; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - j = next_queue(adapter, j); - lut = j; - j = next_queue(adapter, j); - lut |= j << 8; - j = next_queue(adapter, j); - lut |= j << 16; - j = next_queue(adapter, j); - lut |= j << 24; + lut = 0; + for (j = 0; j < 4; j++) { + if (cqueue == adapter->vsi_res->num_queue_pairs) + cqueue = 0; + lut |= ((cqueue) << (8 * j)); + cqueue++; + } wr32(hw, I40E_VFQF_HLUT(i), lut); } i40e_flush(hw); @@ -1481,9 +1485,11 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_adapter *adapter = container_of(work, struct i40evf_adapter, reset_task); + struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; - int i = 0, err; + struct i40evf_mac_filter *f; uint32_t rstat_val; + int i = 0, err; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) @@ -1528,7 +1534,11 @@ static void i40evf_reset_task(struct work_struct *work) if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_down(adapter); + i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1560,22 +1570,38 @@ static void i40evf_reset_task(struct work_struct *work) continue_reset: adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - i40evf_down(adapter); + i40evf_irq_disable(adapter); + + if (netif_running(adapter->netdev)) { + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + adapter->state = __I40EVF_RESETTING; /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) - dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n"); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; err = i40evf_init_adminq(hw); if (err) - dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", + err); - adapter->aq_pending = 0; - adapter->aq_required = 0; i40evf_map_queues(adapter); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->add = true; + } + /* re-add all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->add = true; + } + adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1688,7 +1714,7 @@ out: * * Free all transmit software resources **/ -static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; @@ -1758,7 +1784,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) * * Free all receive software resources **/ -static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; @@ -1788,7 +1814,7 @@ static int i40evf_open(struct net_device *netdev) dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - if (adapter->state != __I40EVF_DOWN) + if (adapter->state != __I40EVF_DOWN || adapter->aq_required) return -EBUSY; /* allocate transmit descriptors */ @@ -1852,9 +1878,6 @@ static int i40evf_close(struct net_device *netdev) adapter->state = __I40EVF_DOWN; i40evf_free_traffic_irqs(adapter); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - return 0; } @@ -1977,7 +2000,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) * * This task completes the work that was begun in probe. Due to the nature * of VF-PF communications, we may need to wait tens of milliseconds to get - * reponses back from the PF. Rather than busy-wait in probe and bog down the + * responses back from the PF. Rather than busy-wait in probe and bog down the * whole system, we'll do it in a task so we can sleep. * This task only runs during driver init. Once we've established * communications with the PF driver and set up our netdev, the watchdog @@ -2003,7 +2026,7 @@ static void i40evf_init_task(struct work_struct *work) if (err) { dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); - goto err; + goto err; } err = i40evf_check_reset_complete(hw); if (err) { @@ -2223,7 +2246,6 @@ static void i40evf_shutdown(struct pci_dev *pdev) /* Prevent the watchdog from running. */ adapter->state = __I40EVF_REMOVE; adapter->aq_required = 0; - adapter->aq_pending = 0; #ifdef CONFIG_PM pci_save_state(pdev); @@ -2368,7 +2390,7 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * i40evf_resume - Power managment resume routine + * i40evf_resume - Power management resume routine * @pdev: PCI device information struct * * Called when the system (VM) is resumed from sleep/suspend. @@ -2441,7 +2463,6 @@ static void i40evf_remove(struct pci_dev *pdev) /* Shut down all the garbage mashers on the detention level */ adapter->state = __I40EVF_REMOVE; adapter->aq_required = 0; - adapter->aq_pending = 0; i40evf_request_reset(adapter); msleep(20); /* If the FW isn't responding, kick it once, but only once. */ @@ -2468,6 +2489,8 @@ static void i40evf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); i40evf_free_queues(adapter); kfree(adapter->vf_res); /* If we got removed before an up/down sequence, we've got a filter diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 3f0c85ecbca6..61e090558f31 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -250,7 +250,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi++; } - adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, (u8 *)vqci, len); @@ -277,7 +276,6 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); @@ -303,7 +301,6 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); @@ -354,7 +351,6 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vimi->vecmap[v_idx].txq_map = 0; vimi->vecmap[v_idx].rxq_map = 0; - adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS; adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, (u8 *)vimi, len); @@ -415,7 +411,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)veal, len); @@ -476,7 +471,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)veal, len); @@ -537,7 +531,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); @@ -598,7 +591,6 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); @@ -720,9 +712,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, __func__, v_retval, v_opcode); } switch (v_opcode) { - case I40E_VIRTCHNL_OP_VERSION: - /* no action, but also not an error */ - break; case I40E_VIRTCHNL_OP_GET_STATS: { struct i40e_eth_stats *stats = (struct i40e_eth_stats *)msg; @@ -740,37 +729,30 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->current_stats = *stats; } break; - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER); - break; - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER); - break; - case I40E_VIRTCHNL_OP_ADD_VLAN: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER); - break; - case I40E_VIRTCHNL_OP_DEL_VLAN: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER); - break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES); /* enable transmits */ i40evf_irq_enable(adapter, true); netif_tx_start_all_queues(adapter->netdev); netif_carrier_on(adapter->netdev); break; case I40E_VIRTCHNL_OP_DISABLE_QUEUES: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES); - break; - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES); + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); break; + case I40E_VIRTCHNL_OP_VERSION: + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: - adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); + /* Don't display an error if we get these out of sequence. + * If the firmware needed to get kicked, we'll get these and + * it's no problem. + */ + if (v_opcode != adapter->current_op) + return; break; default: - dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", - v_opcode); + if (v_opcode != adapter->current_op) + dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); break; } /* switch v_opcode */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f366b3b96d03..8457d0306e3a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1776,6 +1776,7 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ @@ -1797,12 +1798,9 @@ void igb_down(struct igb_adapter *adapter) } } - del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netif_carrier_off(netdev); - /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); @@ -2095,6 +2093,7 @@ static const struct net_device_ops igb_netdev_ops = { #endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, + .ndo_features_check = passthru_features_check, }; /** diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index d20fc8ed11f1..e3b9b63ad010 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -30,7 +30,7 @@ * * Neither the 82576 nor the 82580 offer registers wide enough to hold * nanoseconds time values for very long. For the 82580, SYSTIM always - * counts nanoseconds, but the upper 24 bits are not availible. The + * counts nanoseconds, but the upper 24 bits are not available. The * frequency is adjusted by changing the 32 bit fractional nanoseconds * register, TIMINCA. * @@ -116,7 +116,8 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) } /* SYSTIM read access for I210/I211 */ -static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) +static void igb_ptp_read_i210(struct igb_adapter *adapter, + struct timespec64 *ts) { struct e1000_hw *hw = &adapter->hw; u32 sec, nsec; @@ -134,7 +135,7 @@ static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) } static void igb_ptp_write_i210(struct igb_adapter *adapter, - const struct timespec *ts) + const struct timespec64 *ts) { struct e1000_hw *hw = &adapter->hw; @@ -269,13 +270,13 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); unsigned long flags; - struct timespec now, then = ns_to_timespec(delta); + struct timespec64 now, then = ns_to_timespec64(delta); spin_lock_irqsave(&igb->tmreg_lock, flags); igb_ptp_read_i210(igb, &now); - now = timespec_add(now, then); - igb_ptp_write_i210(igb, (const struct timespec *)&now); + now = timespec64_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec64 *)&now); spin_unlock_irqrestore(&igb->tmreg_lock, flags); @@ -283,13 +284,12 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) } static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, - struct timespec *ts) + struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); unsigned long flags; u64 ns; - u32 remainder; spin_lock_irqsave(&igb->tmreg_lock, flags); @@ -297,14 +297,13 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, spin_unlock_irqrestore(&igb->tmreg_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, - struct timespec *ts) + struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); @@ -320,15 +319,14 @@ static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, } static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); unsigned long flags; u64 ns; - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); spin_lock_irqsave(&igb->tmreg_lock, flags); @@ -340,7 +338,7 @@ static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, } static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); @@ -358,7 +356,7 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) { u32 *ptr = pin < 2 ? ctrl : ctrl_ext; - u32 mask[IGB_N_SDP] = { + static const u32 mask[IGB_N_SDP] = { E1000_CTRL_SDP0_DIR, E1000_CTRL_SDP1_DIR, E1000_CTRL_EXT_SDP2_DIR, @@ -373,16 +371,16 @@ static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) { - struct e1000_hw *hw = &igb->hw; - u32 aux0_sel_sdp[IGB_N_SDP] = { + static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, }; - u32 aux1_sel_sdp[IGB_N_SDP] = { + static const u32 aux1_sel_sdp[IGB_N_SDP] = { AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, }; - u32 ts_sdp_en[IGB_N_SDP] = { + static const u32 ts_sdp_en[IGB_N_SDP] = { TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, }; + struct e1000_hw *hw = &igb->hw; u32 ctrl, ctrl_ext, tssdp = 0; ctrl = rd32(E1000_CTRL); @@ -409,28 +407,28 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) { - struct e1000_hw *hw = &igb->hw; - u32 aux0_sel_sdp[IGB_N_SDP] = { + static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, }; - u32 aux1_sel_sdp[IGB_N_SDP] = { + static const u32 aux1_sel_sdp[IGB_N_SDP] = { AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, }; - u32 ts_sdp_en[IGB_N_SDP] = { + static const u32 ts_sdp_en[IGB_N_SDP] = { TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, }; - u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, }; - u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, }; - u32 ts_sdp_sel_clr[IGB_N_SDP] = { + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, }; + struct e1000_hw *hw = &igb->hw; u32 ctrl, ctrl_ext, tssdp = 0; ctrl = rd32(E1000_CTRL); @@ -468,7 +466,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh; unsigned long flags; struct timespec ts; - int pin; + int pin = -1; s64 ns; switch (rq->type) { @@ -627,11 +625,12 @@ static void igb_ptp_overflow_check(struct work_struct *work) { struct igb_adapter *igb = container_of(work, struct igb_adapter, ptp_overflow_work.work); - struct timespec ts; + struct timespec64 ts; - igb->ptp_caps.gettime(&igb->ptp_caps, &ts); + igb->ptp_caps.gettime64(&igb->ptp_caps, &ts); - pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + pr_debug("igb overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); schedule_delayed_work(&igb->ptp_overflow_work, IGB_SYSTIM_OVERFLOW_PERIOD); @@ -989,8 +988,8 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime = igb_ptp_gettime_82576; - adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82576; adapter->cc.mask = CYCLECOUNTER_MASK(64); @@ -1009,8 +1008,8 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime = igb_ptp_gettime_82576; - adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82580; adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); @@ -1038,8 +1037,8 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; - adapter->ptp_caps.gettime = igb_ptp_gettime_i210; - adapter->ptp_caps.settime = igb_ptp_settime_i210; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210; + adapter->ptp_caps.settime64 = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = igb_ptp_verify_pin; /* Enable the timer functions by clearing bit 31. */ @@ -1057,7 +1056,7 @@ void igb_ptp_init(struct igb_adapter *adapter) /* Initialize the clock and overflow work for devices that need it. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { - struct timespec ts = ktime_to_timespec(ktime_get_real()); + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); igb_ptp_settime_i210(&adapter->ptp_caps, &ts); } else { @@ -1171,7 +1170,7 @@ void igb_ptp_reset(struct igb_adapter *adapter) /* Re-initialize the timer. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { - struct timespec ts = ktime_to_timespec(ktime_get_real()); + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); igb_ptp_write_i210(adapter, &ts); } else { diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index d9fa999b1685..ae3f28332fa0 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,94 +28,93 @@ #define _E1000_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 /* IVAR valid bit */ -#define E1000_IVAR_VALID 0x80 +#define E1000_IVAR_VALID 0x80 /* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ - -#define E1000_RXDEXT_STATERR_LB 0x00040000 -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) /* Device Control */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ /* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 /* Transmit Descriptor bit definitions */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */ -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 /* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ /* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_MBX 15 +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 /* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 -#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 /* Additional Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 2178f87e9f61..c6996feb1cb4 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -36,7 +35,6 @@ #include "igbvf.h" #include <linux/if_vlan.h> - struct igbvf_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) static int igbvf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev, } static int igbvf_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { return -EOPNOTSUPP; } static void igbvf_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { } static int igbvf_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { return -EOPNOTSUPP; } @@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev, static u32 igbvf_get_msglevel(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void igbvf_set_msglevel(struct net_device *netdev, u32 data) { struct igbvf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev) } static void igbvf_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) + struct ethtool_regs *regs, void *p) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev) } static int igbvf_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static int igbvf_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static void igbvf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) + struct ethtool_drvinfo *drvinfo) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev, } static void igbvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev, } static int igbvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; @@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); + new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); + new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring->count) && @@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { adapter->tx_ring->count = new_tx_count; @@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_down(adapter); - /* - * We can't just free everything and then setup again, + /* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers - * to the tx and rx ring structs. + * to the Tx and Rx ring structs. */ if (new_tx_count != adapter->tx_ring->count) { memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); @@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_free_rx_resources(adapter->rx_ring); - memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); + memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); } err_setup: igbvf_up(adapter); @@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) } static void igbvf_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) + struct ethtool_test *eth_test, u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); set_bit(__IGBVF_TESTING, &adapter->state); - /* - * Link test performed before hardware reset so autoneg doesn't + /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igbvf_link_test(adapter, &data[0])) @@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev, } static void igbvf_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; } static int igbvf_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { return -EOPNOTSUPP; } static int igbvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev, } static int igbvf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && - (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { + (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { adapter->current_itr = ec->rx_coalesce_usecs << 2; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); @@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = IGBVF_START_ITR; adapter->requested_itr = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { - /* - * The user's desire is to turn off interrupt throttling + /* The user's desire is to turn off interrupt throttling * altogether, but due to HW limitations, we can't do that. * Instead we set a very small value in EITR, which would * allow ~967k interrupts per second, but allow the adapter's @@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = 4; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); - } else + } else { return -EINVAL; + } writel(adapter->current_itr, hw->hw_addr + adapter->rx_ring->itr_register); @@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev, static int igbvf_nway_reset(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) igbvf_reinit_locked(adapter); return 0; } - static void igbvf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, - u64 *data) + struct ethtool_stats *stats, + u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); int i; @@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev, igbvf_update_stats(adapter); for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { char *p = (char *)adapter + - igbvf_gstrings_stats[i].stat_offset; + igbvf_gstrings_stats[i].stat_offset; char *b = (char *)adapter + - igbvf_gstrings_stats[i].base_stat_offset; + igbvf_gstrings_stats[i].base_stat_offset; data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : - (*(u32 *)p - *(u32 *)b)); + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); } - } static int igbvf_get_sset_count(struct net_device *dev, int stringset) { - switch(stringset) { + switch (stringset) { case ETH_SS_TEST: return IGBVF_TEST_LEN; case ETH_SS_STATS: @@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset) } static void igbvf_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) + u8 *data) { u8 *p = data; int i; diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 7d6a25c8f889..f166baab8d7e 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -43,10 +42,10 @@ struct igbvf_info; struct igbvf_adapter; /* Interrupt defines */ -#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ -#define IGBVF_4K_ITR 980 -#define IGBVF_20K_ITR 196 -#define IGBVF_70K_ITR 56 +#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ +#define IGBVF_4K_ITR 980 +#define IGBVF_20K_ITR 196 +#define IGBVF_70K_ITR 56 enum latency_range { lowest_latency = 0, @@ -55,56 +54,55 @@ enum latency_range { latency_invalid = 255 }; - /* Interrupt modes, as used by the IntMode parameter */ -#define IGBVF_INT_MODE_LEGACY 0 -#define IGBVF_INT_MODE_MSI 1 -#define IGBVF_INT_MODE_MSIX 2 +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 /* Tx/Rx descriptor defines */ -#define IGBVF_DEFAULT_TXD 256 -#define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 -#define IGBVF_DEFAULT_RXD 256 -#define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 -#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ -#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ /* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of - * descriptors available in its onboard memory. - * Setting this to 0 disables RX descriptor prefetch. + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. * HTHRESH - MAC will only prefetch if there are at least this many descriptors - * available in host memory. - * If PTHRESH is 0, this should also be 0. + * available in host memory. + * If PTHRESH is 0, this should also be 0. * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back - * descriptors until either it has this many to write back, or the - * ITR timer expires. + * descriptors until either it has this many to write back, or the + * ITR timer expires. */ -#define IGBVF_RX_PTHRESH 16 -#define IGBVF_RX_HTHRESH 8 -#define IGBVF_RX_WTHRESH 1 +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 /* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 -#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGBVF_TX_QUEUE_WAKE 32 +#define IGBVF_TX_QUEUE_WAKE 32 /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 -#define IGBVF_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 -#define IGBVF_MNG_VLAN_NONE (-1) +#define IGBVF_MNG_VLAN_NONE (-1) /* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) enum igbvf_boards { board_vf, @@ -116,8 +114,7 @@ struct igbvf_queue_stats { u64 bytes; }; -/* - * wrappers around a pointer to a socket buffer, +/* wrappers around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct igbvf_buffer { @@ -148,10 +145,10 @@ union igbvf_desc { struct igbvf_ring { struct igbvf_adapter *adapter; /* backlink */ - union igbvf_desc *desc; /* pointer to ring memory */ - dma_addr_t dma; /* phys address of ring */ - unsigned int size; /* length of ring in bytes */ - unsigned int count; /* number of desc. in ring */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ u16 next_to_use; u16 next_to_clean; @@ -202,9 +199,7 @@ struct igbvf_adapter { u32 requested_itr; /* ints/sec or adaptive */ u32 current_itr; /* Actual ITR register value, not ints/sec */ - /* - * Tx - */ + /* Tx */ struct igbvf_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; @@ -226,9 +221,7 @@ struct igbvf_adapter { u32 tx_fifo_size; u32 tx_dma_failed; - /* - * Rx - */ + /* Rx */ struct igbvf_ring *rx_ring; u32 rx_int_delay; @@ -249,7 +242,7 @@ struct igbvf_adapter { struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; - spinlock_t stats_lock; /* prevent concurrent stats updates */ + spinlock_t stats_lock; /* prevent concurrent stats updates */ /* structs defined in e1000_hw.h */ struct e1000_hw hw; @@ -286,16 +279,16 @@ struct igbvf_adapter { }; struct igbvf_info { - enum e1000_mac_type mac; - unsigned int flags; - u32 pba; - void (*init_ops)(struct e1000_hw *); - s32 (*get_variants)(struct igbvf_adapter *); + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); }; /* hardware capability, feature, and workaround flags */ -#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) -#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) +#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) +#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) #define IGBVF_RX_DESC_ADV(R, i) \ (&((((R).desc))[i].rx_desc)) #define IGBVF_TX_DESC_ADV(R, i) \ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index b4b65bc9fc5d..7b6cb4c3764c 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -54,10 +53,10 @@ out: } /** - * e1000_poll_for_ack - Wait for message acknowledgement + * e1000_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * - * returns SUCCESS if it successfully received a message acknowledgement + * returns SUCCESS if it successfully received a message acknowledgment **/ static s32 e1000_poll_for_ack(struct e1000_hw *hw) { @@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) s32 ret_val = -E1000_ERR_MBX; if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | - E1000_V2PMAILBOX_RSTI))) { + E1000_V2PMAILBOX_RSTI))) { ret_val = E1000_SUCCESS; hw->mbx.stats.rsts++; } @@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) /* Take ownership of the buffer */ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - /* reserve mailbox for vf use */ + /* reserve mailbox for VF use */ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) ret_val = E1000_SUCCESS; @@ -283,7 +282,7 @@ out_no_write: } /** - * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * e1000_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -315,17 +314,18 @@ out_no_read: } /** - * e1000_init_mbx_params_vf - set initial values for vf mailbox + * e1000_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * - * Initializes the hw->mbx struct to correct values for vf mailbox + * Initializes the hw->mbx struct to correct values for VF mailbox */ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout - * value to being communications */ + * value to being communications + */ mbx->timeout = 0; mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; @@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) return E1000_SUCCESS; } - diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 24370bcb0e22..f800bf8eedae 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,44 +29,44 @@ #include "vf.h" -#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is E1000_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* We have a total wait time of 1s for vf mailbox posted messages */ -#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ -#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ -#define E1000_VT_MSGINFO_SHIFT 16 +#define E1000_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ void e1000_init_mbx_ops_generic(struct e1000_hw *hw); s32 e1000_init_mbx_params_vf(struct e1000_hw *); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ebf9d4a42fdd..95af14e139d7 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *); static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); static struct igbvf_info igbvf_vf_info = { - .mac = e1000_vfadapt, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static struct igbvf_info igbvf_i350_vf_info = { - .mac = e1000_vfadapt_i350, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt_i350, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static const struct igbvf_info *igbvf_info_tbl[] = { - [board_vf] = &igbvf_vf_info, - [board_i350_vf] = &igbvf_i350_vf_info, + [board_vf] = &igbvf_vf_info, + [board_i350_vf] = &igbvf_i350_vf_info, }; /** * igbvf_desc_unused - calculate if we have unused descriptors + * @rx_ring: address of receive ring structure **/ static int igbvf_desc_unused(struct igbvf_ring *ring) { @@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring) * @skb: pointer to sk_buff to be indicated to stack **/ static void igbvf_receive_skb(struct igbvf_adapter *adapter, - struct net_device *netdev, - struct sk_buff *skb, - u32 status, u16 vlan) + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) { u16 vid; @@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, } static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, - u32 status_err, struct sk_buff *skb) + u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, * @cleaned_count: number of buffers to repopulate **/ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, - int cleaned_count) + int cleaned_count) { struct igbvf_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; @@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } buffer_info->page_dma = dma_map_page(&pdev->dev, buffer_info->page, - buffer_info->page_offset, - PAGE_SIZE / 2, + buffer_info->page_offset, + PAGE_SIZE / 2, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->page_dma)) { @@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, buffer_info->skb = skb; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, - bufsz, + bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_kfree_skb(buffer_info->skb); @@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } } /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ + * each write-back erases this info. + */ if (adapter->rx_ps_hdr_size) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->dma); + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } @@ -247,7 +247,8 @@ no_buffers: /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, adapter->hw.hw_addr + rx_ring->tail); } @@ -261,7 +262,7 @@ no_buffers: * is no guarantee that everything was cleaned **/ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, - int *work_done, int work_to_do) + int *work_done, int work_to_do) { struct igbvf_ring *rx_ring = adapter->rx_ring; struct net_device *netdev = adapter->netdev; @@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, * that case, it fills the header buffer and spills the rest * into the page. */ - hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) + & E1000_RXDADV_HDRBUFLEN_MASK) >> + E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > adapter->rx_ps_hdr_size) hlen = adapter->rx_ps_hdr_size; @@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, buffer_info->skb = NULL; if (!adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; skb_put(skb, length); @@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, if (!skb_shinfo(skb)->nr_frags) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); skb_put(skb, hlen); } if (length) { dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); + buffer_info->page, + buffer_info->page_offset, + length); if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || (page_count(buffer_info->page) != 1)) @@ -370,7 +372,7 @@ send_up: skb->protocol = eth_type_trans(skb, netdev); igbvf_receive_skb(adapter, netdev, skb, staterr, - rx_desc->wb.upper.vlan); + rx_desc->wb.upper.vlan); next_desc: rx_desc->wb.upper.status_error = 0; @@ -402,7 +404,7 @@ next_desc: } static void igbvf_put_txbuf(struct igbvf_adapter *adapter, - struct igbvf_buffer *buffer_info) + struct igbvf_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) @@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, * Return 0 on success, negative on failure **/ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring) + struct igbvf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; @@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, err: vfree(tx_ring->buffer_info); dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the transmit descriptor ring\n"); + "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -501,7 +503,7 @@ err: vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the receive descriptor ring\n"); + "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } @@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { - if (adapter->rx_ps_hdr_size){ + if (adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); } else { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); } buffer_info->dma = 0; @@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) if (buffer_info->page_dma) dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); put_page(buffer_info->page); buffer_info->page = NULL; @@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); + rx_ring->dma); rx_ring->desc = NULL; } @@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte counts during the last + * interrupt. The advantage of per interrupt computation is faster updates + * and more accurate ITR for the current traffic pattern. Constants in this + * function were computed based on theoretical maximum wire speed and thresholds + * were set based on testing data as well as attempting to minimize response + * time while increasing bulk throughput. **/ static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, enum latency_range itr_setting, @@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); - if (new_itr != adapter->tx_ring->itr_val) { u32 current_itr = adapter->tx_ring->itr_val; - /* - * this attempts to bias the interrupt rate towards Bulk + /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->tx_ring->itr_val = new_itr; adapter->tx_ring->set_itr = 1; @@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) if (new_itr != adapter->rx_ring->itr_val) { u32 current_itr = adapter->rx_ring->itr_val; + new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->rx_ring->itr_val = new_itr; adapter->rx_ring->set_itr = 1; @@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; + skb->len; total_packets += segs; total_bytes += bytecount; } @@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) tx_ring->next_to_clean = i; - if (unlikely(count && - netif_carrier_ok(netdev) && - igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + if (unlikely(count && netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; - /* auto mask will automatically reenable the interrupt when we write - * EICS */ + /* auto mask will automatically re-enable the interrupt when we write + * EICS + */ if (!igbvf_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ ew32(EICS, tx_ring->eims_value); @@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) #define IGBVF_NO_QUEUE -1 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, - int tx_queue, int msix_vector) + int tx_queue, int msix_vector) { struct e1000_hw *hw = &adapter->hw; u32 ivar, index; /* 82576 uses a table-based method for assigning vectors. - Each queue has a single entry in the table to which we write - a vector number along with a "valid" bit. Sadly, the layout - of the table is somewhat counterintuitive. */ + * Each queue has a single entry in the table to which we write + * a vector number along with a "valid" bit. Sadly, the layout + * of the table is somewhat counterintuitive. + */ if (rx_queue > IGBVF_NO_QUEUE) { index = (rx_queue >> 1); ivar = array_er32(IVAR0, index); @@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, /** * igbvf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure * * igbvf_configure_msix sets up the hardware to properly * generate MSI-X interrupts. @@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) /** * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) int err = -ENOMEM; int i; - /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ + /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), - GFP_KERNEL); + GFP_KERNEL); if (adapter->msix_entries) { for (i = 0; i < 3; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix_range(adapter->pdev, - adapter->msix_entries, 3, 3); + adapter->msix_entries, 3, 3); } if (err < 0) { /* MSI-X failed */ dev_err(&adapter->pdev->dev, - "Failed to initialize MSI-X interrupts.\n"); + "Failed to initialize MSI-X interrupts.\n"); igbvf_reset_interrupt_capability(adapter); } } /** * igbvf_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure * * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. @@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) } err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_tx, 0, adapter->tx_ring->name, - netdev); + igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); if (err) goto out; @@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_rx, 0, adapter->rx_ring->name, - netdev); + igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); if (err) goto out; @@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_msix_other, 0, netdev->name, netdev); + igbvf_msix_other, 0, netdev->name, netdev); if (err) goto out; @@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter) /** * igbvf_request_irq - initialize interrupts + * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter) return err; dev_err(&adapter->pdev->dev, - "Unable to allocate interrupt, Error: %d\n", err); + "Unable to allocate interrupt, Error: %d\n", err); return err; } @@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter) /** * igbvf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure **/ static void igbvf_irq_disable(struct igbvf_adapter *adapter) { @@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter) /** * igbvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure **/ static void igbvf_irq_enable(struct igbvf_adapter *adapter) { @@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, - "Failed to remove vlan id %d\n", vid); + "Failed to remove vlan id %d\n", vid); return -EINVAL; } clear_bit(vid, adapter->active_vlans); @@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* Turn off Relaxed Ordering on head write-backs. The writebacks * MUST be delivered in order or it will completely screw up - * our bookeeping. + * our bookkeeping. */ dca_txctrl = er32(DCA_TXCTRL(0)); dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; @@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) u32 srrctl = 0; srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | - E1000_SRRCTL_BSIZEHDR_MASK | - E1000_SRRCTL_BSIZEPKT_MASK); + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); /* Enable queue drop to avoid head of line blocking */ srrctl |= E1000_SRRCTL_DROP_EN; /* Setup buffer sizes */ srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; + E1000_SRRCTL_BSIZEPKT_SHIFT; if (adapter->rx_buffer_len < 2048) { adapter->rx_ps_hdr_size = 0; @@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) } else { adapter->rx_ps_hdr_size = 128; srrctl |= adapter->rx_ps_hdr_size << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } @@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and + /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; @@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter) igbvf_setup_srrctl(adapter); igbvf_configure_rx(adapter); igbvf_alloc_rx_buffers(adapter->rx_ring, - igbvf_desc_unused(adapter->rx_ring)); + igbvf_desc_unused(adapter->rx_ring)); } /* igbvf_reset - bring the hardware into a known good state + * @adapter: private board structure * * This function boots the hardware and enables some settings that * require a configuration cycle of the hardware - those cannot be @@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter) hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies + 1); - return 0; } @@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rxdctl, txdctl; - /* - * signal that we're down so the interrupt handler does not + /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -1514,6 +1519,7 @@ void igbvf_down(struct igbvf_adapter *adapter) rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + netif_carrier_off(netdev); netif_stop_queue(netdev); /* disable transmits in the hardware */ @@ -1530,8 +1536,6 @@ void igbvf_down(struct igbvf_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); - netif_carrier_off(netdev); - /* record the stats before reset*/ igbvf_update_stats(adapter); @@ -1547,7 +1551,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter) { might_sleep(); while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); igbvf_down(adapter); igbvf_up(adapter); clear_bit(__IGBVF_RESETTING, &adapter->state); @@ -1662,8 +1666,7 @@ static int igbvf_open(struct net_device *netdev) if (err) goto err_setup_rx; - /* - * before we allocate an interrupt, we must be ready to handle it. + /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. @@ -1725,6 +1728,7 @@ static int igbvf_close(struct net_device *netdev) return 0; } + /** * igbvf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -1753,15 +1757,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) return 0; } -#define UPDATE_VF_COUNTER(reg, name) \ - { \ - u32 current_counter = er32(reg); \ - if (current_counter < adapter->stats.last_##name) \ - adapter->stats.name += 0x100000000LL; \ - adapter->stats.last_##name = current_counter; \ - adapter->stats.name &= 0xFFFFFFFF00000000LL; \ - adapter->stats.name |= current_counter; \ - } +#define UPDATE_VF_COUNTER(reg, name) \ +{ \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ +} /** * igbvf_update_stats - Update the board statistics counters @@ -1772,8 +1776,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - /* - * Prevent stats update while adapter is being reset, link is down + /* Prevent stats update while adapter is being reset, link is down * or if the pci connection is down. */ if (adapter->link_speed == 0) @@ -1832,7 +1835,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) **/ static void igbvf_watchdog(unsigned long data) { - struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; + struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -1841,8 +1844,8 @@ static void igbvf_watchdog(unsigned long data) static void igbvf_watchdog_task(struct work_struct *work) { struct igbvf_adapter *adapter = container_of(work, - struct igbvf_adapter, - watchdog_task); + struct igbvf_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -1855,8 +1858,8 @@ static void igbvf_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); igbvf_print_link_info(adapter); netif_carrier_on(netdev); @@ -1876,10 +1879,9 @@ static void igbvf_watchdog_task(struct work_struct *work) igbvf_update_stats(adapter); } else { tx_pending = (igbvf_desc_unused(tx_ring) + 1 < - tx_ring->count); + tx_ring->count); if (tx_pending) { - /* - * We've lost link, so the controller stops DMA, + /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). @@ -1898,15 +1900,15 @@ static void igbvf_watchdog_task(struct work_struct *work) round_jiffies(jiffies + (2 * HZ))); } -#define IGBVF_TX_FLAGS_CSUM 0x00000001 -#define IGBVF_TX_FLAGS_VLAN 0x00000002 -#define IGBVF_TX_FLAGS_TSO 0x00000004 -#define IGBVF_TX_FLAGS_IPV4 0x00000008 -#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 static int igbvf_tso(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, __be16 protocol) { @@ -1930,17 +1932,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter, if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + iph->daddr, 0, + IPPROTO_TCP, + 0); } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); } i = tx_ring->next_to_use; @@ -1984,7 +1987,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, } static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, __be16 protocol) { @@ -2005,8 +2008,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) info |= (skb_transport_header(skb) - - skb_network_header(skb)); - + skb_network_header(skb)); context_desc->vlan_macip_lens = cpu_to_le32(info); @@ -2055,6 +2057,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again just in case room has been made available */ @@ -2067,11 +2073,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) return 0; } -#define IGBVF_MAX_TXD_PWR 16 -#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb) { struct igbvf_buffer *buffer_info; @@ -2093,7 +2099,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { const struct skb_frag_struct *frag; @@ -2111,7 +2116,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, - DMA_TO_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; } @@ -2133,7 +2138,7 @@ dma_error: /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -2144,10 +2149,10 @@ dma_error: } static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, - u8 hdr_len) + u8 hdr_len) { union e1000_adv_tx_desc *tx_desc = NULL; struct igbvf_buffer *buffer_info; @@ -2155,7 +2160,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, unsigned int i; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); + E1000_ADVTXD_DCMD_DEXT); if (tx_flags & IGBVF_TX_FLAGS_VLAN) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; @@ -2182,7 +2187,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); + cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) @@ -2193,14 +2198,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ + * at a time, it synchronizes IO on IA64/Altix systems + */ mmiowb(); } @@ -2225,11 +2232,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, return NETDEV_TX_OK; } - /* - * need: count + 4 desc gap to keep tail from touching - * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, - * + 1 desc for context descriptor, + /* need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, * head, otherwise try next time */ if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { @@ -2258,11 +2264,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && - (skb->ip_summed == CHECKSUM_PARTIAL)) + (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; - /* - * count reflects descriptors mapped, if 0 then mapping error + /* count reflects descriptors mapped, if 0 then mapping error * has occurred and we need to rewind the descriptor queue */ count = igbvf_tx_map_adv(adapter, tx_ring, skb); @@ -2313,6 +2318,7 @@ static void igbvf_tx_timeout(struct net_device *netdev) static void igbvf_reset_task(struct work_struct *work) { struct igbvf_adapter *adapter; + adapter = container_of(work, struct igbvf_adapter, reset_task); igbvf_reinit_locked(adapter); @@ -2356,14 +2362,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); /* igbvf_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; if (netif_running(netdev)) igbvf_down(adapter); - /* - * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab @@ -2382,15 +2387,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = PAGE_SIZE / 2; #endif - /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + - ETH_FCS_LEN; + ETH_FCS_LEN; dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -2477,8 +2481,7 @@ static void igbvf_shutdown(struct pci_dev *pdev) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs +/* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ @@ -2503,7 +2506,7 @@ static void igbvf_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2583,7 +2586,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) } static int igbvf_set_features(struct net_device *netdev, - netdev_features_t features) + netdev_features_t features) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2596,21 +2599,21 @@ static int igbvf_set_features(struct net_device *netdev, } static const struct net_device_ops igbvf_netdev_ops = { - .ndo_open = igbvf_open, - .ndo_stop = igbvf_close, - .ndo_start_xmit = igbvf_xmit_frame, - .ndo_get_stats = igbvf_get_stats, - .ndo_set_rx_mode = igbvf_set_multi, - .ndo_set_mac_address = igbvf_set_mac, - .ndo_change_mtu = igbvf_change_mtu, - .ndo_do_ioctl = igbvf_ioctl, - .ndo_tx_timeout = igbvf_tx_timeout, - .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_rx_mode = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igbvf_netpoll, + .ndo_poll_controller = igbvf_netpoll, #endif - .ndo_set_features = igbvf_set_features, + .ndo_set_features = igbvf_set_features, }; /** @@ -2645,8 +2648,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_dma; } } @@ -2686,7 +2689,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + pci_resource_len(pdev, 0)); if (!adapter->hw.hw_addr) goto err_ioremap; @@ -2712,16 +2715,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->bd_number = cards_found++; netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -2742,7 +2745,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pdev->dev, "Error reading MAC address.\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) - dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); } @@ -2751,11 +2755,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); memcpy(adapter->hw.mac.addr, netdev->dev_addr, - netdev->addr_len); + netdev->addr_len); } setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, - (unsigned long) adapter); + (unsigned long)adapter); INIT_WORK(&adapter->reset_task, igbvf_reset_task); INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); @@ -2818,8 +2822,7 @@ static void igbvf_remove(struct pci_dev *pdev) struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* - * The watchdog timer may be rescheduled, so explicitly + /* The watchdog timer may be rescheduled, so explicitly * disable it from being rescheduled. */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -2832,9 +2835,8 @@ static void igbvf_remove(struct pci_dev *pdev) igbvf_reset_interrupt_capability(adapter); - /* - * it is important to delete the napi struct prior to freeing the - * rx ring so that you do not end up with null pointer refs + /* it is important to delete the NAPI struct prior to freeing the + * Rx ring so that you do not end up with null pointer refs */ netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); @@ -2866,17 +2868,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { - .name = igbvf_driver_name, - .id_table = igbvf_pci_tbl, - .probe = igbvf_probe, - .remove = igbvf_remove, + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = igbvf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = igbvf_suspend, - .resume = igbvf_resume, + .suspend = igbvf_suspend, + .resume = igbvf_resume, #endif - .shutdown = igbvf_shutdown, - .err_handler = &igbvf_err_handler + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler }; /** @@ -2888,6 +2890,7 @@ static struct pci_driver igbvf_driver = { static int __init igbvf_init_module(void) { int ret; + pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); pr_info("%s\n", igbvf_copyright); @@ -2909,7 +2912,6 @@ static void __exit igbvf_exit_module(void) } module_exit(igbvf_exit_module); - MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 7dc6341715dc..86a7c120b574 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,81 +27,81 @@ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -/* - * Convenience macros +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ + +/* Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ - (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ - (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ - (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ - (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ - (0x0C010 + ((_n) * 0x40))) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ - (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ - (0x0C028 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ - (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ - (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ - (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ - (0x0E010 + ((_n) * 0x40))) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ - (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ - (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) /* Statistics registers */ -#define E1000_VFGPRC 0x00F10 -#define E1000_VFGORC 0x00F18 -#define E1000_VFMPRC 0x00F3C -#define E1000_VFGPTC 0x00F14 -#define E1000_VFGOTC 0x00F34 -#define E1000_VFGOTLBC 0x00F50 -#define E1000_VFGPTLBC 0x00F44 -#define E1000_VFGORLBC 0x00F48 -#define E1000_VFGPRLBC 0x00F40 +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 /* These act per VF so an array friendly macro is used */ -#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) /* Define macros for handling registers */ -#define er32(reg) readl(hw->hw_addr + E1000_##reg) -#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) #define array_er32(reg, offset) \ readl(hw->hw_addr + E1000_##reg + (offset << 2)) #define array_ew32(reg, offset, val) \ writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) -#define e1e_flush() er32(STATUS) +#define e1e_flush() er32(STATUS) #endif diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 955ad8c2c534..a13baa90ae20 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,17 +24,16 @@ *******************************************************************************/ - #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex); + u16 *duplex); static s32 e1000_init_hw_vf(struct e1000_hw *hw); static s32 e1000_reset_hw_vf(struct e1000_hw *hw); static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, - u32, u32, u32); + u32, u32, u32); static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); static s32 e1000_read_mac_addr_vf(struct e1000_hw *); static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); @@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw) * the status register's data which is often stale and inaccurate. **/ static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex) + u16 *duplex) { s32 status; @@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) u8 *addr = (u8 *)(&msgbuf[1]); u32 ctrl; - /* assert vf queue/interrupt reset */ + /* assert VF queue/interrupt reset */ ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_RST); @@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* mailbox timeout can now become active */ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; - /* notify pf of vf reset completion */ + /* notify PF of VF reset completion */ msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); @@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) memcpy(hw->mac.perm_addr, addr, ETH_ALEN); else ret_val = -E1000_ERR_MAC_INIT; @@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; - /* - * The bit_shift is the number of left-shifts + /* The bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); + (((u16)mc_addr[5]) << bit_shift))); return hash_value; } @@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) * unless there are workarounds that change this. **/ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 rar_used_count, u32 rar_count) + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[E1000_VFMAILBOX_SIZE]; @@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) * @addr: pointer to the receive address * @index: receive address array register **/ -static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; @@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) s32 ret_val = E1000_SUCCESS; u32 in_msg = 0; - /* - * We only want to run this if there has been a rst asserted. + /* We only want to run this if there has been a rst asserted. * in this case that could mean a link change, device reset, * or a virtual function reset */ @@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) if (!mac->get_link_status) goto out; - /* if link status is down no point in checking to see if pf is up */ + /* if link status is down no point in checking to see if PF is up */ if (!(er32(STATUS) & E1000_STATUS_LU)) goto out; /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ + * until we are called again and don't report an error + */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; /* if incoming message isn't clear to send we are waiting on response */ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { - /* message is not CTS and is NACK we must have lost CTS status */ + /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & E1000_VT_MSGTYPE_NACK) ret_val = -E1000_ERR_MAC_INIT; goto out; } - /* the pf is talking, if we timed out in the past we reinit */ + /* the PF is talking, if we timed out in the past we reinit */ if (!mbx->timeout) { ret_val = -E1000_ERR_MAC_INIT; goto out; } /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ + * longer need to check for link + */ mac->get_link_status = false; out: diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 57db3c68dfcd..0f1eca639f68 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -38,30 +37,29 @@ struct e1000_hw; -#define E1000_DEV_ID_82576_VF 0x10CA -#define E1000_DEV_ID_I350_VF 0x1520 -#define E1000_REVISION_0 0 -#define E1000_REVISION_1 1 -#define E1000_REVISION_2 2 -#define E1000_REVISION_3 3 -#define E1000_REVISION_4 4 +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 -/* - * Receive Address Register Count +/* Receive Address Register Count * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * These entries are also used for MAC-based filtering. */ -#define E1000_RAR_ENTRIES_VF 1 +#define E1000_RAR_ENTRIES_VF 1 /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { - u64 pkt_addr; /* Packet buffer address */ - u64 hdr_addr; /* Header buffer address */ + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ } read; struct { struct { @@ -69,53 +67,53 @@ union e1000_adv_rx_desc { u32 data; struct { u16 pkt_info; /* RSS/Packet type */ - u16 hdr_info; /* Split Header, - * hdr buffer length */ + /* Split Header, hdr buffer length */ + u16 hdr_info; } hs_rss; } lo_dword; union { - u32 rss; /* RSS Hash */ + u32 rss; /* RSS Hash */ struct { - u16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - u32 status_error; /* ext status/error */ - u16 length; /* Packet length */ - u16 vlan; /* VLAN tag */ + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { struct { - u64 buffer_addr; /* Address of descriptor's data buf */ + u64 buffer_addr; /* Address of descriptor's data buf */ u32 cmd_type_len; u32 olinfo_status; } read; struct { - u64 rsvd; /* Reserved */ + u64 rsvd; /* Reserved */ u32 nxtseq_seed; u32 status; } wb; }; /* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { @@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc { u32 mss_l4len_idx; }; -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ enum e1000_mac_type { e1000_undefined = 0, @@ -262,5 +260,4 @@ struct e1000_hw { void e1000_rlpml_set_vf(struct e1000_hw *, u16); void e1000_init_function_pointers_vf(struct e1000_hw *hw); - #endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 11a1bdbe3fd9..31f91459312f 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -285,6 +285,8 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) /* prevent the interrupt handler from restarting watchdog */ set_bit(__IXGB_DOWN, &adapter->flags); + netif_carrier_off(netdev); + napi_disable(&adapter->napi); /* waiting for NAPI to complete can re-enable interrupts */ ixgb_irq_disable(adapter); @@ -298,7 +300,6 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) adapter->link_speed = 0; adapter->link_duplex = 0; - netif_carrier_off(netdev); netif_stop_queue(netdev); ixgb_reset(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7dcbbec09a70..636f9e350162 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -151,6 +151,7 @@ struct vf_data_storage { u16 tx_rate; u16 vlan_count; u8 spoofchk_enabled; + bool rss_query_enabled; unsigned int vf_api; }; @@ -613,7 +614,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7) #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) @@ -643,7 +643,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11) /* Tx fast path data */ int num_tx_queues; @@ -723,6 +722,8 @@ struct ixgbe_adapter { u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; + u16 bridge_mode; + u16 eeprom_verh; u16 eeprom_verl; u16 eeprom_cap; @@ -766,6 +767,15 @@ struct ixgbe_adapter { u8 default_up; unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ + +/* maximum number of RETA entries among all devices supported by ixgbe + * driver: currently it's x550 device in non-SRIOV mode + */ +#define IXGBE_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; + +#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -955,4 +965,5 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index c5c97b483d7c..824a7ab79ab6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -171,17 +171,21 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function. - * Disables relaxed ordering Then set pcie completion timeout + * Disables relaxed ordering for archs other than SPARC + * Then set pcie completion timeout * **/ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { +#ifndef CONFIG_SPARC u32 regval; u32 i; +#endif s32 ret_val; ret_val = ixgbe_start_hw_generic(hw); +#ifndef CONFIG_SPARC /* Disable relaxed ordering */ for (i = 0; ((i < hw->mac.max_tx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { @@ -197,7 +201,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } - +#endif if (ret_val) return ret_val; @@ -1193,6 +1197,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_82598 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index cf55a0df877b..e0c363948bf4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1977,7 +1977,10 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) */ hw->mac.ops.disable_rx_buff(hw); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); hw->mac.ops.enable_rx_buff(hw); @@ -2336,6 +2339,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .prot_autoc_read = &prot_autoc_read_82599, .prot_autoc_write = &prot_autoc_write_82599, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_82599 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 9c66babd4edd..06d8f3cfa099 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -312,7 +312,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; - u32 regval; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { @@ -321,20 +320,25 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); +#ifndef CONFIG_SPARC /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { + u32 regval; + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); } for (i = 0; i < hw->mac.max_rx_queues; i++) { + u32 regval; + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } - +#endif return 0; } @@ -703,7 +707,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) hw->adapter_stopped = true; /* Disable the receive unit */ - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); + hw->mac.ops.disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -2639,7 +2643,10 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); return 0; } @@ -3850,3 +3857,44 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) return 0; } +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 8cfadcb2676e..f21f8a165ec4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -130,6 +130,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); #define IXGBE_FAILED_READ_REG 0xffffffffU #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e5be0dd508de..eafa9ec802ba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1351,7 +1351,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, if (ixgbe_removed(adapter->hw.hw_addr)) { *data = 1; - return 1; + return true; } for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { before = ixgbe_read_reg(&adapter->hw, reg); @@ -1376,7 +1376,7 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, if (ixgbe_removed(adapter->hw.hw_addr)) { *data = 1; - return 1; + return true; } before = ixgbe_read_reg(&adapter->hw, reg); ixgbe_write_reg(&adapter->hw, reg, write & mask); @@ -1637,9 +1637,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) /* shut down the DMA engines now so they can be reinitialized later */ /* first Rx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - reg_ctl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); + hw->mac.ops.disable_rx(hw); ixgbe_disable_rx_queue(adapter, rx_ring); /* now Tx */ @@ -1670,6 +1668,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; u32 rctl, reg_data; int ret_val; int err; @@ -1713,14 +1712,16 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) goto err_nomem; } - rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); ixgbe_configure_rx_ring(adapter, rx_ring); - rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + rctl |= IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + hw->mac.ops.enable_rx(hw); + return 0; err_nomem: @@ -2852,6 +2853,45 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ixgbe_rss_indir_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return ixgbe_rss_indir_tbl_entries(adapter); +} + +static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + ixgbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); + + return 0; +} + static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -3109,6 +3149,9 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 2ad91cb04dab..631c603fc966 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -71,6 +71,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_hw *hw; u32 fcbuff; if (!netdev) @@ -85,25 +86,51 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (!ddp->udl) return 0; + hw = &adapter->hw; len = ddp->len; - /* if there an error, force to invalidate ddp context */ - if (ddp->err) { + /* if no error then skip ddp context invalidation */ + if (!ddp->err) + goto skip_ddpinv; + + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP FCoE lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), + (xid | IXGBE_FCFLTRW_WE)); + + /* program FCBUFF */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); + + /* program FCDMARW */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_WE)); + + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); + } else { + /* other hardware requires DDP FCoE lock */ spin_lock_bh(&fcoe->lock); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_WE)); /* guaranteed to be invalidated after 100us */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); spin_unlock_bh(&fcoe->lock); - if (fcbuff & IXGBE_FCBUFF_VALID) - udelay(100); - } + } + + if (fcbuff & IXGBE_FCBUFF_VALID) + usleep_range(100, 150); + +skip_ddpinv: if (ddp->sgl) dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); @@ -272,7 +299,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, /* program DMA context */ hw = &adapter->hw; - spin_lock_bh(&fcoe->lock); /* turn on last frame indication for target mode as FCP_RSPtarget is * supposed to send FCP_RSP when it is done. */ @@ -283,16 +309,33 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); } - IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); - /* program filter context */ - IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), + ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); + } else { + /* DDP lock for indirect DDP context access */ + spin_lock_bh(&fcoe->lock); + + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); - spin_unlock_bh(&fcoe->lock); + spin_unlock_bh(&fcoe->lock); + } return 1; @@ -371,6 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, struct fcoe_crc_eof *crc; __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); __le32 ddp_err; + int ddp_max; u32 fctl; u16 xid; @@ -392,7 +436,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, else xid = be16_to_cpu(fh->fh_rx_id); - if (xid >= IXGBE_FCOE_DDP_MAX) + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + if (xid >= ddp_max) return -EINVAL; fcoe = &adapter->fcoe; @@ -612,7 +660,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; - int i, fcoe_q, fcoe_i; + int i, fcoe_q, fcoe_i, fcoe_q_h = 0; + int fcreta_size; u32 etqf; /* Minimal functionality for FCoE requires at least CRC offloads */ @@ -633,10 +682,23 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) return; /* Use one or more Rx queues for FCoE by redirection table */ - for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { + fcreta_size = IXGBE_FCRETA_SIZE; + if (adapter->hw.mac.type == ixgbe_mac_X550) + fcreta_size = IXGBE_FCRETA_SIZE_X550; + + for (i = 0; i < fcreta_size; i++) { + if (adapter->hw.mac.type == ixgbe_mac_X550) { + int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % + fcoe->indices); + fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; + fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & + IXGBE_FCRETA_ENTRY_HIGH_MASK; + } + fcoe_i = fcoe->offset + (i % fcoe->indices); fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + fcoe_q |= fcoe_q_h; IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); } IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); @@ -672,13 +734,18 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; - int cpu, i; + int cpu, i, ddp_max; /* do nothing if no DDP pools were allocated */ if (!fcoe->ddp_pool) return; - for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + + for (i = 0; i < ddp_max; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); for_each_possible_cpu(cpu) @@ -758,6 +825,9 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) } adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 0772b7730fce..38385876effb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -46,6 +46,7 @@ #define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ #define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ #define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ +#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ /* Default traffic class to use for FCoE */ #define IXGBE_FCOE_DEFTC 3 @@ -77,7 +78,7 @@ struct ixgbe_fcoe { struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; atomic_t refcnt; spinlock_t lock; - struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550]; void *extra_ddp_buffer; dma_addr_t extra_ddp_buffer_dma; unsigned long mode; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 70cc4c5c0a01..d3f4b0ceb3f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1619,14 +1619,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { - struct ixgbe_adapter *adapter = q_vector->adapter; - if (ixgbe_qv_busy_polling(q_vector)) netif_receive_skb(skb); - else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); else - netif_rx(skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -2609,7 +2605,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) eicr = IXGBE_READ_REG(hw, IXGBE_EICS); /* The lower 16bits of the EICR register are for the queue interrupts - * which should be masked here in order to not accidently clear them if + * which should be masked here in order to not accidentally clear them if * the bits are high when ixgbe_msix_other is called. There is a race * condition otherwise which results in possible performance loss * especially if the ixgbe_msix_other interrupt is triggering @@ -3232,89 +3228,148 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed) +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + * - 82598/82599/X540: 128 + * - X550(non-SRIOV mode): 512 + * - X550(SRIOV mode): 64 + */ +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 128; + else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 512; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_reta(struct ixgbe_adapter *adapter) { + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 reta = 0; - int i, j; - int reta_entries = 128; - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - int indices_multi; - - /* - * Program table for at least 2 queues w/ SR-IOV so that VFs can - * make full use of any rings they may have. We will use the - * PSRTYPE register to control how many rings we use within the PF. - */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) - rss_i = 2; - - /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + u32 indices_multi; + u8 *indir_tbl = adapter->rss_indir_tbl; /* Fill out the redirection table as follows: - * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices - * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index - * X550: 512 (8 bit wide) entries containing 6 bit RSS index + * - 82598: 8 bit wide entries containing pair of 4 bit RSS + * indices. + * - 82599/X540: 8 bit wide entries containing 4 bit RSS index + * - X550: 8 bit wide entries containing 6 bit RSS index */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) indices_multi = 0x11; else indices_multi = 0x1; - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - reta_entries = 512; - default: - break; - } - - /* Fill out redirection table */ - for (i = 0, j = 0; i < reta_entries; i++, j++) { - if (j == rss_i) - j = 0; - reta = (reta << 8) | (j * indices_multi); + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; if ((i & 3) == 3) { if (i < 128) IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); else IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); + reta = 0; } } } -static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed) +/** + * Write the RETA table to HW (for x550 devices in SRIOV mode) + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) { + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); + vfreta = 0; + } + } +} + +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ixgbe_store_reta(adapter); +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; unsigned int pf_pool = adapter->num_vfs; int i, j; /* Fill out hash function seeds */ for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]); + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), + adapter->rss_key[i]); /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { if (j == rss_i) j = 0; - vfreta = (vfreta << 8) | j; - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), - vfreta); + + adapter->rss_indir_tbl[i] = j; } + + ixgbe_store_vfreta(adapter); } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 mrqc = 0, rss_field = 0, vfmrqc = 0; - u32 rss_key[10]; u32 rxcsum; /* Disable indicating checksum in descriptor, enables RSS hash */ @@ -3358,7 +3413,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - netdev_rss_key_fill(rss_key, sizeof(rss_key)); + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { unsigned int pf_pool = adapter->num_vfs; @@ -3368,12 +3423,12 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* Setup RSS through the VF registers */ - ixgbe_setup_vfreta(adapter, rss_key); + ixgbe_setup_vfreta(adapter); vfmrqc = IXGBE_MRQC_RSSEN; vfmrqc |= rss_field; IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); } else { - ixgbe_setup_reta(adapter, rss_key); + ixgbe_setup_reta(adapter); mrqc |= rss_field; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } @@ -3557,7 +3612,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) + if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ @@ -3603,6 +3658,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* enable ethertype anti spoofing if hw supports it */ if (hw->mac.ops.set_ethertype_anti_spoofing) hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + + /* Enable/Disable RSS query feature */ + ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, + adapter->vfinfo[i].rss_query_enabled); } } @@ -3705,8 +3764,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) u32 rxctrl, rfctl; /* disable receives while setting up the descriptors */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); @@ -3731,6 +3789,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); /* disable drop enable for 82598 parts */ if (hw->mac.type == ixgbe_mac_82598EB) rxctrl |= IXGBE_RXCTRL_DMBYPS; @@ -3924,7 +3983,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) for (i = 0; i < hw->mac.num_rar_entries; i++) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; } ixgbe_sync_mac_table(adapter); @@ -3992,7 +4051,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) adapter->mac_table[i].queue == queue) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; ixgbe_sync_mac_table(adapter); return 0; @@ -5014,7 +5073,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *upper; struct list_head *iter; - u32 rxctrl; int i; /* signal that we are down to the interrupt handler */ @@ -5022,8 +5080,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) return; /* do nothing if already down */ /* disable receives */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -6174,7 +6231,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) /* Cause software interrupt to ensure rings are cleaned */ ixgbe_irq_rearm_queues(adapter, eics); - } /** @@ -7507,14 +7563,9 @@ static void ixgbe_netpoll(struct net_device *netdev) if (test_bit(__IXGBE_DOWN, &adapter->state)) return; - adapter->flags |= IXGBE_FLAG_IN_NETPOLL; - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - for (i = 0; i < adapter->num_q_vectors; i++) - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); - } else { - ixgbe_intr(adapter->pdev->irq, netdev); - } - adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; + /* loop through and schedule all active queues */ + for (i = 0; i < adapter->num_q_vectors; i++) + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); } #endif @@ -7882,6 +7933,80 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } +/** + * ixgbe_configure_bridge_mode - set various bridge modes + * @adapter - the private structure + * @mode - requested bridge mode + * + * Configure some settings require for various bridge modes. + **/ +static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, + __u16 mode) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int p, num_pools; + u32 vmdctl; + + switch (mode) { + case BRIDGE_MODE_VEPA: + /* disable Tx loopback, rely on switch hairpin mode */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); + + /* must enable Rx switching replication to allow multicast + * packet reception on all VFs, and to enable source address + * pruning. + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + true, + p); + } + break; + case BRIDGE_MODE_VEB: + /* enable Tx loopback for internal VF/PF communication */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, + IXGBE_PFDTXGSWC_VT_LBEN); + + /* disable Rx switching replication unless we have SR-IOV + * virtual functions + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + if (!adapter->num_vfs) + vmdctl &= ~IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + false, + p); + } + break; + default: + return -EINVAL; + } + + adapter->bridge_mode = mode; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return 0; +} + static int ixgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { @@ -7897,8 +8022,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { + u32 status; __u16 mode; - u32 reg = 0; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; @@ -7907,19 +8032,11 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EINVAL; mode = nla_get_u16(attr); - if (mode == BRIDGE_MODE_VEPA) { - reg = 0; - adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB; - } else if (mode == BRIDGE_MODE_VEB) { - reg = IXGBE_PFDTXGSWC_VT_LBEN; - adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; - } else - return -EINVAL; + status = ixgbe_configure_bridge_mode(adapter, mode); + if (status) + return status; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg); - - e_info(drv, "enabling bridge mode: %s\n", - mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + break; } return 0; @@ -7930,17 +8047,12 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, u32 filter_mask) { struct ixgbe_adapter *adapter = netdev_priv(dev); - u16 mode; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; - if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) - mode = BRIDGE_MODE_VEB; - else - mode = BRIDGE_MODE_VEPA; - - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + adapter->bridge_mode, 0, 0); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) @@ -8052,6 +8164,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB @@ -8406,7 +8519,6 @@ skip_sriov: NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | @@ -8428,6 +8540,7 @@ skip_sriov: } netdev->hw_features |= NETIF_F_RXALL; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; @@ -8989,8 +9102,6 @@ static void __exit ixgbe_exit_module(void) pci_unregister_driver(&ixgbe_driver); ixgbe_dbg_exit(); - - rcu_barrier(); /* Wait for completion of call_rcu()'s */ } #ifdef CONFIG_IXGBE_DCA diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index a5cb755de3a9..b1e4703ff2a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -73,6 +73,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -97,6 +98,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 79c00f57d3e7..e5ba04025e2b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -279,20 +279,18 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) * read the timecounter and return the correct value on ns, * after converting it into a struct timespec. */ -static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); u64 ns; - u32 remainder; unsigned long flags; spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -306,15 +304,14 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * wall timer value. */ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); u64 ns; unsigned long flags; - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -407,7 +404,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + IXGBE_OVERFLOW_PERIOD); - struct timespec ts; + struct timespec64 ts; if (timeout) { ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); @@ -488,7 +485,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) * @work: pointer to the work struct * * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware - * timestamp has been taken for the current skb. It is necesary, because the + * timestamp has been taken for the current skb. It is necessary, because the * descriptor's "done" bit does not correlate with the timestamp event. */ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) @@ -874,8 +871,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime = ixgbe_ptp_gettime; - adapter->ptp_caps.settime = ixgbe_ptp_settime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; case ixgbe_mac_82599EB: @@ -890,8 +887,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime = ixgbe_ptp_gettime; - adapter->ptp_caps.settime = ixgbe_ptp_settime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7f37fe7269a7..1d17b5872dd1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -36,6 +36,7 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/ipv6.h> +#include <linux/if_bridge.h> #ifdef NETIF_F_HW_VLAN_CTAG_TX #include <linux/if_vlan.h> #endif @@ -79,7 +80,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) /* Initialize default switching mode VEB */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + adapter->bridge_mode = BRIDGE_MODE_VEB; /* If call to enable VFs succeeded then allocate memory * for per VF control structures. @@ -105,9 +106,18 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); - /* enable spoof checking for all VFs */ - for (i = 0; i < adapter->num_vfs; i++) + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; + + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + } + return 0; } @@ -141,7 +151,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the - * physical function. If the user requests greater thn + * physical function. If the user requests greater than * 63 VFs then it is an error - reset to default of zero. */ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); @@ -424,6 +434,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) #endif /* CONFIG_FCOE */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: /* * Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are @@ -891,6 +902,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, switch (api) { case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -914,6 +926,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: break; default: return -1; @@ -941,6 +954,53 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 i, j; + u32 *out_buf = &msgbuf[1]; + const u8 *reta = adapter->rss_indir_tbl; + u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + /* Check if operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + /* This mailbox command is supported (required) only for 82599 and x540 + * VFs which support up to 4 RSS queues. Therefore we will compress the + * RETA by saving only 2 bits from each entry. This way we will be able + * to transfer the whole RETA in a single mailbox operation. + */ + for (i = 0; i < reta_size / 16; i++) { + out_buf[i] = 0; + for (j = 0; j < 16; j++) + out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); + } + + return 0; +} + +static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *rss_key = &msgbuf[1]; + + /* Check if the operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -997,6 +1057,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_QUEUES: retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; + case IXGBE_VF_GET_RETA: + retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_RSS_KEY: + retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1330,6 +1396,26 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) return 0; } +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* This operation is currently supported only for 82599 and x540 + * devices. + */ + if (adapter->hw.mac.type < ixgbe_mac_82599EB || + adapter->hw.mac.type >= ixgbe_mac_X550) + return -EOPNOTSUPP; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].rss_query_enabled = setting; + + return 0; +} + int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1343,5 +1429,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 32c26d586c01..2c197e6d1fe7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -47,6 +47,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index fc5ecee56ca8..dd6ba5916dfe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -285,6 +285,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ #define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 #define IXGBE_VT_CTL 0x051B0 #define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ #define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ @@ -608,6 +610,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RTTBCNRM 0x04980 #define IXGBE_RTTQCNRM 0x04980 +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) /* FCoE DMA Context Registers */ #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ #define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ @@ -634,6 +638,9 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ #define IXGBE_REOFF 0x05158 /* Rx FC EOF */ #define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) /* FCoE Filter Context Registers */ #define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ @@ -664,6 +671,10 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ #define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ #define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 /* Stats registers */ #define IXGBE_CRCERRS 0x04000 @@ -1690,7 +1701,7 @@ enum { #define IXGBE_MACC_FS 0x00040000 #define IXGBE_MAC_RX2TX_LPBK 0x00000002 -/* Veto Bit definiton */ +/* Veto Bit definition */ #define IXGBE_MMNGC_MNG_VETO 0x00000001 /* LINKS Bit Masks */ @@ -2462,8 +2473,8 @@ struct ixgbe_hic_read_shadow_ram { struct ixgbe_hic_write_shadow_ram { union ixgbe_hic_hdr2 hdr; - u32 address; - u16 length; + __be32 address; + __be16 length; u16 pad2; u16 data; u16 pad3; @@ -3067,6 +3078,10 @@ struct ixgbe_mac_operations { s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); /* DMA Coalescing */ @@ -3137,6 +3152,7 @@ struct ixgbe_mac_info { u8 flags; u8 san_mac_rar_index; struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool set_lben; }; struct ixgbe_phy_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 49395420c9b3..f5f948d08b43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -820,6 +820,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 50bf81908dd6..cf5cf819a6b8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -557,6 +557,47 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) return status; } +/** ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + /** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash * @hw: pointer to hardware structure * @@ -1306,8 +1347,8 @@ mac_reset_top: * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing **/ -void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, - int vf) +static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; @@ -1322,6 +1363,33 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } +/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -1356,6 +1424,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, .init_uta_tables = &ixgbe_init_uta_tables_generic, \ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .set_source_address_pruning = \ + &ixgbe_set_source_address_pruning_X550, \ .set_ethertype_anti_spoofing = \ &ixgbe_set_ethertype_anti_spoofing_X550, \ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ @@ -1366,6 +1436,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, .init_thermal_sensor_thresh = NULL, \ .prot_autoc_read = &prot_autoc_read_generic, \ .prot_autoc_write = &prot_autoc_write_generic, \ + .enable_rx = &ixgbe_enable_rx_generic, \ + .disable_rx = &ixgbe_disable_rx_x550, \ static struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 7412d378b77b..770e21a64388 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,138 +28,138 @@ #define _IXGBEVF_DEFINES_H_ /* Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_VF_IRQ_CLEAR_MASK 7 -#define IXGBE_VF_MAX_TX_QUEUES 8 -#define IXGBE_VF_MAX_RX_QUEUES 8 +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 /* DCB define */ #define IXGBE_VF_MAX_TRAFFIC_CLASS 8 /* Link speed */ typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 /* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ /* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 /* DCA Control */ #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ /* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 /* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 /* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ - -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ @@ -176,16 +175,16 @@ typedef u32 ixgbe_link_speed; IXGBE_RXDADV_ERR_OSE | \ IXGBE_RXDADV_ERR_USE) -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { @@ -241,44 +240,44 @@ struct ixgbe_adv_tx_context_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Interrupt register bitmasks */ -#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 #define IXGBE_MAX_EITR 0x00000FF8 #define IXGBE_MIN_EITR 8 /* Error Codes */ -#define IXGBE_ERR_INVALID_MAC_ADDR -1 -#define IXGBE_ERR_RESET_FAILED -2 -#define IXGBE_ERR_INVALID_ARGUMENT -3 +#define IXGBE_ERR_INVALID_MAC_ADDR -1 +#define IXGBE_ERR_RESET_FAILED -2 +#define IXGBE_ERR_INVALID_ARGUMENT -3 /* Transmit Config masks */ #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index cc0e5b7ff041..b2f5b161d792 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; + #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) static int ixgbevf_get_settings(struct net_device *netdev, @@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev, if (link_up) { __u32 speed = SPEED_10000; + switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed = SPEED_10000; @@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev, static u32 ixgbevf_get_msglevel(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead - * read EICS which is a shadow but doesn't clear EICR */ + * read EICS which is a shadow but doesn't clear EICR + */ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); @@ -390,21 +394,21 @@ clear_reset: static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) { - switch (stringset) { - case ETH_SS_TEST: - return IXGBE_TEST_LEN; - case ETH_SS_STATS: - return IXGBE_GLOBAL_STATS_LEN; - default: - return -EINVAL; - } + switch (stringset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } } static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - char *base = (char *) adapter; + char *base = (char *)adapter; int i; #ifdef BP_EXTENDED_STATS u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, @@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) } test = reg_test_vf; - /* - * Perform the register test, looping through the test table + /* Perform the register test, looping through the test table * until we either fail or reach the null entry. */ while (test->reg) { @@ -617,8 +620,8 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) break; case WRITE_NO_TEST: ixgbe_write_reg(&adapter->hw, - test->reg + (i * 0x40), - test->write); + test->reg + (i * 0x40), + test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, @@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev, hw_dbg(&adapter->hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ + * interfere with test result + */ if (ixgbevf_link_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev, else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - /* if in mixed tx/rx queues per vector mode, report only rx settings */ + /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) return 0; @@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, int num_vectors, i; u16 tx_itr_param, rx_itr_param; - /* don't accept tx specific changes if we've got mixed RxTx vectors */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count - && ec->tx_coalesce_usecs) + /* don't accept Tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && + adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) return -EINVAL; - if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; @@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, else rx_itr_param = adapter->rx_itr_setting; - if (ec->tx_coalesce_usecs > 1) adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; else @@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, for (i = 0; i < num_vectors; i++) { q_vector = adapter->q_vector[i]; if (q_vector->tx.count && !q_vector->rx.count) - /* tx only */ + /* Tx only */ q_vector->itr = tx_itr_param; else - /* rx only or mixed */ + /* Rx only or mixed */ q_vector->itr = rx_itr_param; ixgbevf_write_eitr(q_vector); } @@ -792,23 +794,92 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, return 0; } +static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct ixgbevf_adapter *adapter = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_rx_queues; + return 0; + default: + hw_dbg(&adapter->hw, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_82599_RETA_SIZE; + + return 0; +} + +static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_RSS_HASH_KEY_SIZE; + + return 0; +} + +static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* If neither indirection table nor hash key was requested - just + * return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; + + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + + return err; +} + static const struct ethtool_ops ixgbevf_ethtool_ops = { - .get_settings = ixgbevf_get_settings, - .get_drvinfo = ixgbevf_get_drvinfo, - .get_regs_len = ixgbevf_get_regs_len, - .get_regs = ixgbevf_get_regs, - .nway_reset = ixgbevf_nway_reset, - .get_link = ethtool_op_get_link, - .get_ringparam = ixgbevf_get_ringparam, - .set_ringparam = ixgbevf_set_ringparam, - .get_msglevel = ixgbevf_get_msglevel, - .set_msglevel = ixgbevf_set_msglevel, - .self_test = ixgbevf_diag_test, - .get_sset_count = ixgbevf_get_sset_count, - .get_strings = ixgbevf_get_strings, - .get_ethtool_stats = ixgbevf_get_ethtool_stats, - .get_coalesce = ixgbevf_get_coalesce, - .set_coalesce = ixgbevf_set_coalesce, + .get_settings = ixgbevf_get_settings, + .get_drvinfo = ixgbevf_get_drvinfo, + .get_regs_len = ixgbevf_get_regs_len, + .get_regs = ixgbevf_get_regs, + .nway_reset = ixgbevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = ixgbevf_get_ringparam, + .set_ringparam = ixgbevf_set_ringparam, + .get_msglevel = ixgbevf_get_msglevel, + .set_msglevel = ixgbevf_set_msglevel, + .self_test = ixgbevf_diag_test, + .get_sset_count = ixgbevf_get_sset_count, + .get_strings = ixgbevf_get_strings, + .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, + .get_rxnfc = ixgbevf_get_rxnfc, + .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, + .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, + .get_rxfh = ixgbevf_get_rxfh, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 3a9b356dff01..775d08900949 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -51,7 +50,8 @@ #define DESC_NEEDED (MAX_SKB_FRAGS + 4) /* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ + * so a DMA handle can be stored along with the buffer + */ struct ixgbevf_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; @@ -132,9 +132,10 @@ struct ixgbevf_ring { u8 __iomem *tail; struct sk_buff *skb; - u16 reg_idx; /* holds the special value that gets the hardware register - * offset associated with this ring, which is different - * for DCB and RSS modes */ + /* holds the special value that gets the hardware register offset + * associated with this ring, which is different for DCB and RSS modes + */ + u16 reg_idx; int queue_index; /* needed for multiqueue queue management */ }; @@ -143,21 +144,23 @@ struct ixgbevf_ring { #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES -#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_82599_RETA_SIZE 128 +#define IXGBEVF_RSS_HASH_KEY_SIZE 40 -#define IXGBEVF_DEFAULT_TXD 1024 -#define IXGBEVF_DEFAULT_RXD 512 -#define IXGBEVF_MAX_TXD 4096 -#define IXGBEVF_MIN_TXD 64 -#define IXGBEVF_MAX_RXD 4096 -#define IXGBEVF_MIN_RXD 64 +#define IXGBEVF_DEFAULT_TXD 1024 +#define IXGBEVF_DEFAULT_RXD 512 +#define IXGBEVF_MAX_TXD 4096 +#define IXGBEVF_MIN_TXD 64 +#define IXGBEVF_MAX_RXD 4096 +#define IXGBEVF_MIN_RXD 64 /* Supported Rx Buffer Sizes */ -#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_2048 2048 -#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 -#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 +#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -186,10 +189,11 @@ struct ixgbevf_ring_container { */ struct ixgbevf_q_vector { struct ixgbevf_adapter *adapter; - u16 v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring */ - u16 itr; /* Interrupt throttle rate written to EITR */ + /* index of q_vector within array, also used for finding the bit in + * EICR and friends that represents the vector for this ring + */ + u16 v_idx; + u16 itr; /* Interrupt throttle rate written to EITR */ struct napi_struct napi; struct ixgbevf_ring_container rx, tx; char name[IFNAMSIZ + 9]; @@ -199,19 +203,21 @@ struct ixgbevf_q_vector { #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ -#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) -#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ -#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) -#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ + IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ + IXGBEVF_QV_STATE_POLL_YIELD) spinlock_t lock; #endif /* CONFIG_NET_RX_BUSY_POLL */ }; + #ifdef CONFIG_NET_RX_BUSY_POLL static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) { - spin_lock_init(&q_vector->lock); q_vector->state = IXGBEVF_QV_STATE_IDLE; } @@ -220,6 +226,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBEVF_QV_LOCKED) { WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); @@ -240,6 +247,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) { int rc = false; + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_NAPI_YIELD)); @@ -256,6 +264,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if ((q_vector->state & IXGBEVF_QV_LOCKED)) { q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; @@ -275,6 +284,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) { int rc = false; + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); @@ -297,6 +307,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBEVF_QV_OWNED) rc = false; @@ -307,8 +318,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) #endif /* CONFIG_NET_RX_BUSY_POLL */ -/* - * microsecond values for various ITR rates shifted by 2 to fit itr register +/* microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ #define IXGBE_MIN_RSC_ITR 24 @@ -345,22 +355,22 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) writel(value, ring->tail); } -#define IXGBEVF_RX_DESC(R, i) \ +#define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_DESC(R, i) \ +#define IXGBEVF_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_CTXTDESC(R, i) \ +#define IXGBEVF_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 2 +#define MAX_MSIX_Q_VECTORS 2 -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* board specific private data structure */ struct ixgbevf_adapter { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 4186981e562d..a16d267fbce4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,7 +24,6 @@ *******************************************************************************/ - /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ @@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue - */ + **/ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; + if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, ivar |= msix_vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { - /* tx or rx causes */ + /* Tx or Rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); @@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } -/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum +/** + * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - */ + **/ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } -/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor +/** + * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated @@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, * This function checks the ring, descriptor, and packet information in * order to populate the checksum, VLAN, protocol, and other fields within * the skb. - */ + **/ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, } } -/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail +/** + * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being adjusted * @@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, * that allow for significant optimizations versus the standard function. * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. - */ + **/ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, struct sk_buff *skb) { @@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, skb->tail += pull_len; } -/* ixgbevf_cleanup_headers - Correct corrupted or empty headers +/** + * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed @@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. - */ + **/ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, return false; } -/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring +/** + * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter - */ + **/ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *old_buff) { @@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; } -/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff +/** + * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @rx_desc: descriptor containing length of buffer written by hardware @@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) * * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. - */ + **/ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, union ixgbe_adv_rx_desc *rx_desc, @@ -958,7 +963,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * source pruning. */ if ((skb->pkt_type == PACKET_BROADCAST || - skb->pkt_type == PACKET_MULTICAST) && + skb->pkt_type == PACKET_MULTICAST) && ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); @@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) #endif /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling */ + * the budget to go below 1 because we'll exit polling + */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else @@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information - */ + **/ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; @@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; - /* - * set the WDIS bit to not clear the timer bits and cause an + /* set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; @@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->eims_enable_mask = 0; - /* - * Populate the IVAR table and set the ITR values to the + /* Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbevf_ring *ring; + q_vector = adapter->q_vector[v_idx]; ixgbevf_for_each_ring(ring, q_vector->rx) @@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { - /* tx only vector */ + /* Tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_10K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { - /* rx or rx/tx vector */ + /* Rx or Rx/Tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else @@ -1167,13 +1172,13 @@ enum latency_range { * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. **/ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container) @@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, if (packets == 0) return; - /* simple throttlerate management + /* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (8000 ints/s) @@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - /* - * The ideal configuration... + /* The ideal configuration... * We have enough vectors to map one per queue. */ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { @@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) goto out; } - /* - * If we don't have enough vectors for a 1-to-1 + /* If we don't have enough vectors for a 1-to-1 * mapping, we'll have to group them so there are * multiple queues per vector. */ @@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) q_vector->name, q_vector); if (err) { hw_dbg(&adapter->hw, - "request_irq failed for MSIX interrupt " - "Error: %d\n", err); + "request_irq failed for MSIX interrupt Error: %d\n", + err); goto free_queue_irqs; } } @@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { - hw_dbg(&adapter->hw, - "request_irq for msix_other failed: %d\n", err); + hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", + err); goto free_queue_irqs; } @@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) for (i = 0; i < q_vectors; i++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; + q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; q_vector->rx.count = 0; @@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) err = ixgbevf_request_msix_irqs(adapter); if (err) - hw_dbg(&adapter->hw, - "request_irq failed, Error %d\n", err); + hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); return err; } @@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); - /* the hardware may take up to 100us to really disable the rx queue */ + /* the hardware may take up to 100us to really disable the Rx queue */ do { udelay(10); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); @@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring */ + * the Base and Length of the Rx Descriptor Ring + */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); } @@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; + netdev_for_each_uc_addr(ha, netdev) { hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); udelay(200); } } else { - /* - * If the list is empty then send message to PF driver to - * clear all macvlans on this VF. + /* If the list is empty then send message to PF driver to + * clear all MAC VLANs on this VF. */ hw->mac.ops.set_uc_addr(hw, 0, NULL); } @@ -2026,7 +2030,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_11, + int api[] = { ixgbe_mbox_api_12, + ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int err = 0, idx = 0; @@ -2184,7 +2189,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) return; /* do nothing if already down */ - /* disable all enabled rx queues */ + /* disable all enabled Rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); @@ -2328,6 +2333,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) switch (hw->api_version) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; default: @@ -2406,8 +2412,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) int err = 0; int vector, v_budget; - /* - * It's easy to be greedy for MSI-X vectors, but it really + /* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. @@ -2418,7 +2423,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) v_budget += NON_Q_VECTORS; /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ + * mean we disable MSI-X capabilities of the adapter. + */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) { @@ -2544,8 +2550,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) err = ixgbevf_alloc_q_vectors(adapter); if (err) { - hw_dbg(&adapter->hw, "Unable to allocate memory for queue " - "vectors\n"); + hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } @@ -2555,8 +2560,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) goto err_alloc_queues; } - hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " - "Tx Queue count = %u\n", + hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); @@ -2600,7 +2604,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) /** * ixgbevf_sw_init - Initialize general software structures - * (struct ixgbevf_adapter) * @adapter: board private structure to initialize * * ixgbevf_sw_init initializes the Adapter private data structure. @@ -2615,7 +2618,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) int err; /* PCI config space info */ - hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; @@ -2686,8 +2688,8 @@ out: { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ - u64 current_counter = (current_counter_msb << 32) | \ - current_counter_lsb; \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ @@ -2758,14 +2760,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) ixgbevf_reinit_locked(adapter); } -/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter - pointer to the device adapter structure +/** + * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred. - */ + **/ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -2783,7 +2786,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) set_check_for_tx_hang(adapter->tx_ring[i]); } - /* get one bit for every active tx/rx interrupt vector */ + /* get one bit for every active Tx/Rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; @@ -2797,7 +2800,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_update_link - update the link status - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) { @@ -2825,7 +2828,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_link_is_up - update netif_carrier status and * print link up message - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) { @@ -2850,7 +2853,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_link_is_down - update netif_carrier status and * print link down message - * @adapter - pointer to the adapter structure + * @adapter: pointer to the adapter structure **/ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) { @@ -2956,7 +2959,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup + * @tx_ring: Tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ @@ -2983,8 +2986,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " - "descriptor ring\n"); + hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -3006,8 +3008,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, - "Allocation for Tx Queue %u failed\n", i); + hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); break; } @@ -3016,7 +3017,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ @@ -3065,8 +3066,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, - "Allocation for Rx Queue %u failed\n", i); + hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); break; } return err; @@ -3136,11 +3136,11 @@ static int ixgbevf_open(struct net_device *netdev) if (hw->adapter_stopped) { ixgbevf_reset(adapter); /* if adapter is still stopped then PF isn't up and - * the vf can't start. */ + * the VF can't start. + */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; - pr_err("Unable to start - perhaps the PF Driver isn't " - "up yet\n"); + pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); goto err_setup_reset; } } @@ -3163,8 +3163,7 @@ static int ixgbevf_open(struct net_device *netdev) ixgbevf_configure(adapter); - /* - * Map the Tx/Rx rings to the vectors we were allotted. + /* Map the Tx/Rx rings to the vectors we were allotted. * if request_irq will be called in this function map_rings * must be called *before* up_complete */ @@ -3288,6 +3287,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, @@ -3313,7 +3313,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, *hdr_len += l4len; *hdr_len = skb_transport_offset(skb) + l4len; - /* update gso size and bytecount with header size */ + /* update GSO size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; @@ -3343,6 +3343,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; + switch (first->protocol) { case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); @@ -3356,8 +3357,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); + "partial checksum but proto=%x!\n", + first->protocol); } break; } @@ -3380,8 +3381,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum but l4 proto=%x!\n", + l4_hdr); } break; } @@ -3405,7 +3406,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); - /* set HW vlan bit if vlan is present */ + /* set HW VLAN bit if VLAN is present */ if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); @@ -3572,11 +3573,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again in a case another CPU has just - * made room available. */ + * made room available. + */ if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; @@ -3615,8 +3618,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring = adapter->tx_ring[skb->queue_mapping]; - /* - * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, @@ -3712,6 +3714,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; default: @@ -3794,8 +3797,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) u32 err; pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call + /* pci_restore_state clears dev->state_saved so call * pci_save_state to restore it. */ pci_save_state(pdev); @@ -3930,8 +3932,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } pci_using_dac = 0; @@ -3962,8 +3963,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); - /* - * call save state here in standalone driver because it relies on + /* call save state here in standalone driver because it relies on * adapter struct to exist, and needs to call netdev_priv */ pci_save_state(pdev); @@ -3978,7 +3978,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbevf_assign_netdev_ops(netdev); - /* Setup hw api */ + /* Setup HW API */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; @@ -3998,11 +3998,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | @@ -4131,7 +4131,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) * * This function is called after a PCI bus error affecting * this device has been detected. - */ + **/ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -4166,7 +4166,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the ixgbevf_resume routine. - */ + **/ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4194,7 +4194,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the ixgbevf_resume routine. - */ + **/ static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4214,17 +4214,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = { }; static struct pci_driver ixgbevf_driver = { - .name = ixgbevf_driver_name, - .id_table = ixgbevf_pci_tbl, - .probe = ixgbevf_probe, - .remove = ixgbevf_remove, + .name = ixgbevf_driver_name, + .id_table = ixgbevf_pci_tbl, + .probe = ixgbevf_probe, + .remove = ixgbevf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = ixgbevf_suspend, - .resume = ixgbevf_resume, + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, #endif - .shutdown = ixgbevf_shutdown, - .err_handler = &ixgbevf_err_handler + .shutdown = ixgbevf_shutdown, + .err_handler = &ixgbevf_err_handler }; /** @@ -4236,6 +4236,7 @@ static struct pci_driver ixgbevf_driver = { static int __init ixgbevf_init_module(void) { int ret; + pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); @@ -4266,6 +4267,7 @@ static void __exit ixgbevf_exit_module(void) char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; + return adapter->netdev->name; } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index d5028ddf4b31..dc68fea4894b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) } /** - * ixgbevf_poll_for_ack - Wait for message acknowledgement + * ixgbevf_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * - * returns 0 if it successfully received a message acknowledgement + * returns 0 if it successfully received a message acknowledgment **/ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) { @@ -213,7 +212,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) s32 ret_val = IXGBE_ERR_MBX; if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | - IXGBE_VFMAILBOX_RSTI))) { + IXGBE_VFMAILBOX_RSTI))) { ret_val = 0; hw->mbx.stats.rsts++; } @@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) /* Take ownership of the buffer */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); - /* reserve mailbox for vf use */ + /* reserve mailbox for VF use */ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) ret_val = 0; @@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) s32 ret_val; u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ + /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_write; @@ -279,7 +277,7 @@ out_no_write: } /** - * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf + * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) s32 ret_val = 0; u16 i; - /* lock the mailbox to prevent pf/vf race condition */ + /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_read; @@ -311,17 +309,18 @@ out_no_read: } /** - * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox + * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * - * Initializes the hw->mbx struct to correct values for vf mailbox + * Initializes the hw->mbx struct to correct values for VF mailbox */ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout - * value to begin communications */ + * value to begin communications + */ mbx->timeout = 0; mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; @@ -337,13 +336,13 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) } const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .read = ixgbevf_read_mbx_vf, - .write = ixgbevf_write_mbx_vf, - .read_posted = ixgbevf_read_posted_mbx, - .write_posted = ixgbevf_write_posted_mbx, - .check_for_msg = ixgbevf_check_for_msg_vf, - .check_for_ack = ixgbevf_check_for_ack_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, + .init_params = ixgbevf_init_mbx_params_vf, + .read = ixgbevf_read_mbx_vf, + .write = ixgbevf_write_mbx_vf, + .read_posted = ixgbevf_read_posted_mbx, + .write_posted = ixgbevf_write_posted_mbx, + .check_for_msg = ixgbevf_check_for_msg_vf, + .check_for_ack = ixgbevf_check_for_ack_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 0bc30058ff82..82f44e06e5fc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,71 +29,70 @@ #include "vf.h" -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 /* Define mailbox register bits */ -#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) -#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is IXGBE_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - * clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 +/* Messages below or'd with this are the ACK */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 +#define IXGBE_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) /* definitions to support mailbox API version negotiation */ -/* - * each element denotes a version of the API; existing numbers may not +/* each element denotes a version of the API; existing numbers may not * change; any additions must go at the end */ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; /* mailbox API, legacy requests */ -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ /* mailbox API, version 1.0 VF requests */ #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ @@ -105,20 +103,24 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ /* GET_QUEUES return data indices within the mailbox */ -#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ -#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ -#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ -#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ /* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 +#define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 +#define IXGBE_VF_MC_TYPE_WORD 3 -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ /* forward declaration of the HW struct */ struct ixgbe_hw; diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 3e712fd6e695..2764fd16261f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,58 +27,58 @@ #ifndef _IXGBEVF_REGS_H_ #define _IXGBEVF_REGS_H_ -#define IXGBE_VFCTRL 0x00000 -#define IXGBE_VFSTATUS 0x00008 -#define IXGBE_VFLINKS 0x00010 -#define IXGBE_VFFRTIMER 0x00048 -#define IXGBE_VFRXMEMWRAP 0x03190 -#define IXGBE_VTEICR 0x00100 -#define IXGBE_VTEICS 0x00104 -#define IXGBE_VTEIMS 0x00108 -#define IXGBE_VTEIMC 0x0010C -#define IXGBE_VTEIAC 0x00110 -#define IXGBE_VTEIAM 0x00114 -#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) -#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) -#define IXGBE_VTIVAR_MISC 0x00140 -#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) -#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) -#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) -#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) -#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) -#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) -#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) -#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) -#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) -#define IXGBE_VFPSRTYPE 0x00300 -#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) -#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) -#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) -#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) -#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) -#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) -#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) -#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) -#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) -#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) -#define IXGBE_VFGPRC 0x0101C -#define IXGBE_VFGPTC 0x0201C -#define IXGBE_VFGORC_LSB 0x01020 -#define IXGBE_VFGORC_MSB 0x01024 -#define IXGBE_VFGOTC_LSB 0x02020 -#define IXGBE_VFGOTC_MSB 0x02024 -#define IXGBE_VFMPRC 0x01034 -#define IXGBE_VFMRQC 0x3000 -#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) -#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) /* VFMRQC bits */ -#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 -#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) +#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) #endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index cdb53be7d995..d1339b050627 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -65,7 +64,7 @@ static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) * ixgbevf_reset_hw_vf - Performs hardware reset * @hw: pointer to hardware structure * - * Resets the hardware by reseting the transmit and receive units, masks and + * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts. **/ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) @@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) mdelay(10); - /* set our "perm_addr" based on info provided by PF */ - /* also set up the mc_filter_type which is piggy backed - * on the mac address in word 3 */ + /* set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); if (ret_val) return ret_val; @@ -117,7 +117,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - memcpy(hw->mac.perm_addr, addr, ETH_ALEN); + ether_addr_copy(hw->mac.perm_addr, addr); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; @@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) u32 reg_val; u16 i; - /* - * Set the adapter_stopped flag so other driver functions stop touching + /* Set the adapter_stopped flag so other driver functions stop touching * the hardware */ hw->adapter_stopped = true; @@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) * * Extracts the 12 bits, from a multicast address, to determine which * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in + * incoming Rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. @@ -220,7 +219,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) **/ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) { - memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); + ether_addr_copy(mac_addr, hw->mac.perm_addr); return 0; } @@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) s32 ret_val; memset(msgbuf, 0, sizeof(msgbuf)); - /* - * If index is one then this is the start of a new list and needs + /* If index is one then this is the start of a new list and needs * indication to the PF so it can do it's own list management. * If it is zero then that tells the PF to just clear all of * this VF's macvlans and there is no new list. @@ -242,7 +240,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; if (addr) - memcpy(msg_addr, addr, ETH_ALEN); + ether_addr_copy(msg_addr, addr); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) @@ -259,6 +257,129 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) } /** + * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. + * @adapter: pointer to the port handle + * @reta: buffer to fill with RETA contents. + * @num_rx_queues: Number of Rx queues configured for this port + * + * The "reta" buffer should be big enough to contain 32 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) +{ + int err, i, j; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u32 *hw_reta = &msgbuf[1]; + u32 mask = 0; + + /* We have to use a mailbox for 82599 and x540 devices only. + * For these devices RETA has 128 entries. + * Also these VFs support up to 4 RSS queues. Therefore PF will compress + * 16 RETA entries in each DWORD giving 2 bits to each entry. + */ + int dwords = IXGBEVF_82599_RETA_SIZE / 16; + + /* We support the RSS querying for 82599 and x540 devices only. + * Thus return an error if API doesn't support RETA querying or querying + * is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RETA; + + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* ixgbevf doesn't support more than 2 queues at the moment */ + if (num_rx_queues > 1) + mask = 0x1; + + for (i = 0; i < dwords; i++) + for (j = 0; j < 16; j++) + reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; + + return 0; +} + +/** + * ixgbevf_get_rss_key_locked - get the RSS Random Key + * @hw: pointer to the HW structure + * @rss_key: buffer to fill with RSS Hash Key contents. + * + * The "rss_key" buffer should be big enough to contain 10 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) +{ + int err; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + + /* We currently support the RSS Random Key retrieval for 82599 and x540 + * devices only. + * + * Thus return an error if API doesn't support RSS Random Key retrieval + * or if the operation is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RSS_KEY; + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, 11); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); + + return 0; +} + +/** * ixgbevf_set_rar_vf - set device MAC address * @hw: pointer to hardware structure * @index: Receive address register to write @@ -275,7 +396,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; - memcpy(msg_addr, addr, ETH_ALEN); + ether_addr_copy(msg_addr, addr); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) @@ -292,7 +413,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, } static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, - u32 *msg, u16 size) + u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 retmsg[IXGBE_VFMAILBOX_SIZE]; @@ -348,7 +469,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** - * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address + * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID * @vind: unused by VF drivers @@ -462,7 +583,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, } /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ + * until we are called again and don't report an error + */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; @@ -480,7 +602,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, } /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ + * longer need to check for link + */ mac->get_link_status = false; out: @@ -545,6 +668,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, /* do nothing if API doesn't support ixgbevf_get_queues */ switch (hw->api_version) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: break; default: return 0; @@ -561,8 +685,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; - /* - * if we we didn't get an ACK there must have been + /* if we we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such */ @@ -595,17 +718,17 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, } static const struct ixgbe_mac_operations ixgbevf_mac_ops = { - .init_hw = ixgbevf_init_hw_vf, - .reset_hw = ixgbevf_reset_hw_vf, - .start_hw = ixgbevf_start_hw_vf, - .get_mac_addr = ixgbevf_get_mac_addr_vf, - .stop_adapter = ixgbevf_stop_hw_vf, - .setup_link = ixgbevf_setup_mac_link_vf, - .check_link = ixgbevf_check_mac_link_vf, - .set_rar = ixgbevf_set_rar_vf, - .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, - .set_uc_addr = ixgbevf_set_uc_addr_vf, - .set_vfta = ixgbevf_set_vfta_vf, + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_check_mac_link_vf, + .set_rar = ixgbevf_set_rar_vf, + .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .set_uc_addr = ixgbevf_set_uc_addr_vf, + .set_vfta = ixgbevf_set_vfta_vf, }; const struct ixgbevf_info ixgbevf_82599_vf_info = { diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 5b172427f459..d40f036b6df0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -169,7 +168,7 @@ struct ixgbevf_hw_stats { }; struct ixgbevf_info { - enum ixgbe_mac_type mac; + enum ixgbe_mac_type mac; const struct ixgbe_mac_operations *mac_ops; }; @@ -185,28 +184,32 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) return; writel(value, reg_addr + reg); } + #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); #define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset, u32 value) + u32 offset, u32 value) { ixgbe_write_reg(hw, reg + (offset << 2), value); } + #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset) + u32 offset) { return ixgbevf_read_reg(hw, reg + (offset << 2)); } + #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key); #endif /* __IXGBE_VF_H__ */ - diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 96208f17bb53..ce5f7f9cff06 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -100,6 +100,8 @@ #define MVNETA_TXQ_CMD 0x2448 #define MVNETA_TXQ_DISABLE_SHIFT 8 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff +#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 +#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) #define MVNETA_ACC_MODE 0x2500 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff @@ -122,6 +124,7 @@ #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) +#define MVNETA_MISCINTR_INTR_MASK BIT(31) #define MVNETA_INTR_OLD_CAUSE 0x25a8 #define MVNETA_INTR_OLD_MASK 0x25ac @@ -165,6 +168,7 @@ #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVNETA_GMAC0_PORT_ENABLE BIT(0) #define MVNETA_GMAC_CTRL_2 0x2c08 +#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) #define MVNETA_GMAC2_PCS_ENABLE BIT(3) #define MVNETA_GMAC2_PORT_RGMII BIT(4) #define MVNETA_GMAC2_PORT_RESET BIT(6) @@ -180,9 +184,11 @@ #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) +#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVNETA_GMAC_AN_SPEED_EN BIT(7) +#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) #define MVNETA_MIB_COUNTERS_BASE 0x3080 @@ -304,6 +310,7 @@ struct mvneta_port { unsigned int link; unsigned int duplex; unsigned int speed; + int use_inband_status:1; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the @@ -994,6 +1001,20 @@ static void mvneta_defaults_set(struct mvneta_port *pp) val &= ~MVNETA_PHY_POLLING_ENABLE; mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + if (pp->use_inband_status) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | + MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_AN_FLOW_CTRL_EN); + val |= MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); + } + mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2043,6 +2064,28 @@ static irqreturn_t mvneta_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static int mvneta_fixed_link_update(struct mvneta_port *pp, + struct phy_device *phy) +{ + struct fixed_phy_status status; + struct fixed_phy_status changed = {}; + u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); + + status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); + if (gmac_stat & MVNETA_GMAC_SPEED_1000) + status.speed = SPEED_1000; + else if (gmac_stat & MVNETA_GMAC_SPEED_100) + status.speed = SPEED_100; + else + status.speed = SPEED_10; + status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); + changed.link = 1; + changed.speed = 1; + changed.duplex = 1; + fixed_phy_update_state(phy, &status, &changed); + return 0; +} + /* NAPI handler * Bits 0 - 7 of the causeRxTx register indicate that are transmitted * packets on the corresponding TXQ (Bit 0 is for TX queue 1). @@ -2063,8 +2106,18 @@ static int mvneta_poll(struct napi_struct *napi, int budget) } /* Read cause register */ - cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & - (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); + cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); + if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { + u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); + + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + if (pp->use_inband_status && (cause_misc & + (MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE))) { + mvneta_fixed_link_update(pp, pp->phy_dev); + } + } /* Release Tx descriptors */ if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { @@ -2109,7 +2162,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget) napi_complete(napi); local_irq_save(flags); mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | + MVNETA_MISCINTR_INTR_MASK); local_irq_restore(flags); } @@ -2373,7 +2428,13 @@ static void mvneta_start_dev(struct mvneta_port *pp) /* Unmask interrupts */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | + MVNETA_MISCINTR_INTR_MASK); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); phy_start(pp->phy_dev); netif_tx_start_all_queues(pp->dev); @@ -2523,9 +2584,7 @@ static void mvneta_adjust_link(struct net_device *ndev) val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN); + MVNETA_GMAC_CONFIG_FULL_DUPLEX); if (phydev->duplex) val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; @@ -2554,12 +2613,24 @@ static void mvneta_adjust_link(struct net_device *ndev) if (status_change) { if (phydev->link) { - u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val |= (MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_FORCE_LINK_DOWN); - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + if (!pp->use_inband_status) { + u32 val = mvreg_read(pp, + MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; + val |= MVNETA_GMAC_FORCE_LINK_PASS; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + val); + } mvneta_port_up(pp); } else { + if (!pp->use_inband_status) { + u32 val = mvreg_read(pp, + MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; + val |= MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + val); + } mvneta_port_down(pp); } phy_print_status(phydev); @@ -2658,16 +2729,11 @@ static int mvneta_stop(struct net_device *dev) static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvneta_port *pp = netdev_priv(dev); - int ret; if (!pp->phy_dev) return -ENOTSUPP; - ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); - if (!ret) - mvneta_adjust_link(dev); - - return ret; + return phy_mii_ioctl(pp->phy_dev, ifr, cmd); } /* Ethtool methods */ @@ -2910,6 +2976,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) return -EINVAL; } + if (pp->use_inband_status) + ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE; + /* Cancel Port Reset */ ctrl &= ~MVNETA_GMAC2_PORT_RESET; mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); @@ -2934,6 +3003,7 @@ static int mvneta_probe(struct platform_device *pdev) char hw_mac_addr[ETH_ALEN]; const char *mac_from; int phy_mode; + int fixed_phy = 0; int err; /* Our multiqueue support is not complete, so for now, only @@ -2967,6 +3037,7 @@ static int mvneta_probe(struct platform_device *pdev) dev_err(&pdev->dev, "cannot register fixed PHY\n"); goto err_free_irq; } + fixed_phy = 1; /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. @@ -2990,6 +3061,8 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); pp->phy_node = phy_node; pp->phy_interface = phy_mode; + pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) && + fixed_phy; pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { @@ -3067,6 +3140,12 @@ static int mvneta_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pp->dev); + if (pp->use_inband_status) { + struct phy_device *phy = of_phy_find_device(dn); + + mvneta_fixed_link_update(pp, phy); + } + return 0; err_free_stats: diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index fdf3e382e464..3e8b1bfb1f2e 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -1423,7 +1423,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) { struct mvpp2_prs_entry pe; - /* Promiscous mode - Accept unknown packets */ + /* Promiscuous mode - Accept unknown packets */ if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { /* Entry exist - update port only */ @@ -3402,7 +3402,7 @@ static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool for (i = 0; i < bm_pool->buf_num; i++) { u32 vaddr; - /* Get buffer virtual adress (indirect access) */ + /* Get buffer virtual address (indirect access) */ mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); if (!vaddr) diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile index 3e9c70f15b42..c82217e0d22d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -1,7 +1,8 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o -mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ - mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o +mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \ + main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \ + srq.o resource_tracker.o obj-$(CONFIG_MLX4_EN) += mlx4_en.o diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a681d7c0bb9f..4f7dc044601e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -48,6 +48,7 @@ #include "mlx4.h" #include "fw.h" +#include "fw_qos.h" #define CMD_POLL_TOKEN 0xffff #define INBOX_MASK 0xffffffffffffff00ULL @@ -724,8 +725,10 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, * on the host, we deprecate the error message for this * specific command/input_mod/opcode_mod/fw-status to be debug. */ - if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && - op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) + if (op == MLX4_CMD_SET_PORT && + (in_modifier == 1 || in_modifier == 2) && + op_modifier == MLX4_SET_PORT_IB_OPCODE && + context->fw_status == CMD_STAT_BAD_SIZE) mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", op, context->fw_status); else @@ -936,21 +939,34 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, return err; } if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) { - /* compute slave's gid block */ - smp->attr_mod = cpu_to_be32(slave / 8); - /* execute cmd */ - err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, - vhcr->in_modifier, opcode_modifier, - vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); - if (!err) { - /* if needed, move slave gid to index 0 */ - if (slave % 8) - memcpy(outsmp->data, - outsmp->data + (slave % 8) * 8, 8); - /* delete all other gids */ - memset(outsmp->data + 8, 0, 56); + __be64 guid = mlx4_get_admin_guid(dev, slave, + port); + + /* set the PF admin guid to the FW/HW burned + * GUID, if it wasn't yet set + */ + if (slave == 0 && guid == 0) { + smp->attr_mod = 0; + err = mlx4_cmd_box(dev, + inbox->dma, + outbox->dma, + vhcr->in_modifier, + opcode_modifier, + vhcr->op, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) + return err; + mlx4_set_admin_guid(dev, + *(__be64 *)outsmp-> + data, slave, port); + } else { + memcpy(outsmp->data, &guid, 8); } - return err; + + /* clean all other gids */ + memset(outsmp->data + 8, 0, 56); + return 0; } if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, @@ -1455,6 +1471,24 @@ static struct mlx4_cmd_info cmd_info[] = { .wrapper = mlx4_CMD_EPERM_wrapper, }, { + .opcode = MLX4_CMD_ALLOCATE_VPP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { + .opcode = MLX4_CMD_SET_VPORT_QOS, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { .opcode = MLX4_CMD_CONF_SPECIAL_QP, .has_inbox = false, .has_outbox = false, @@ -1499,6 +1533,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_ACCESS_REG_wrapper, }, + { + .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, /* Native multicast commands are not available for guests */ { .opcode = MLX4_CMD_QP_ATTACH, @@ -1781,7 +1824,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, if (vp_oper->state.default_vlan == vp_admin->default_vlan && vp_oper->state.default_qos == vp_admin->default_qos && - vp_oper->state.link_state == vp_admin->link_state) + vp_oper->state.link_state == vp_admin->link_state && + vp_oper->state.qos_vport == vp_admin->qos_vport) return 0; if (!(priv->mfunc.master.slave_state[slave].active && @@ -1839,6 +1883,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, vp_oper->state.default_vlan = vp_admin->default_vlan; vp_oper->state.default_qos = vp_admin->default_qos; vp_oper->state.link_state = vp_admin->link_state; + vp_oper->state.qos_vport = vp_admin->qos_vport; if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; @@ -1847,6 +1892,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, work->port = port; work->slave = slave; work->qos = vp_oper->state.default_qos; + work->qos_vport = vp_oper->state.qos_vport; work->vlan_id = vp_oper->state.default_vlan; work->vlan_ix = vp_oper->vlan_idx; work->priv = priv; @@ -1856,6 +1902,63 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, return 0; } +static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port) +{ + struct mlx4_qos_manager *port_qos_ctl; + struct mlx4_priv *priv = mlx4_priv(dev); + + port_qos_ctl = &priv->mfunc.master.qos_ctl[port]; + bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP); + + /* Enable only default prio at PF init routine */ + set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm); +} + +static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) +{ + int i; + int err; + int num_vfs; + u16 availible_vpp; + u8 vpp_param[MLX4_NUM_UP]; + struct mlx4_qos_manager *port_qos; + struct mlx4_priv *priv = mlx4_priv(dev); + + err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + if (err) { + mlx4_info(dev, "Failed query availible VPPs\n"); + return; + } + + port_qos = &priv->mfunc.master.qos_ctl[port]; + num_vfs = (availible_vpp / + bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP)); + + for (i = 0; i < MLX4_NUM_UP; i++) { + if (test_bit(i, port_qos->priority_bm)) + vpp_param[i] = num_vfs; + } + + err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param); + if (err) { + mlx4_info(dev, "Failed allocating VPPs\n"); + return; + } + + /* Query actual allocated VPP, just to make sure */ + err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + if (err) { + mlx4_info(dev, "Failed query availible VPPs\n"); + return; + } + + port_qos->num_of_qos_vfs = num_vfs; + mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp); + + for (i = 0; i < MLX4_NUM_UP; i++) + mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i, + vpp_param[i]); +} static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) { @@ -1993,7 +2096,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, goto reset_slave; slave_state[slave].vhcr_dma = ((u64) param) << 48; priv->mfunc.master.slave_state[slave].cookie = 0; - mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]); break; case MLX4_COMM_CMD_VHCR1: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) @@ -2204,6 +2306,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) } if (mlx4_is_master(dev)) { + struct mlx4_vf_oper_state *vf_oper; + struct mlx4_vf_admin_state *vf_admin; + priv->mfunc.master.slave_state = kzalloc(dev->num_slaves * sizeof(struct mlx4_slave_state), GFP_KERNEL); @@ -2223,8 +2328,11 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) goto err_comm_oper; for (i = 0; i < dev->num_slaves; ++i) { + vf_admin = &priv->mfunc.master.vf_admin[i]; + vf_oper = &priv->mfunc.master.vf_oper[i]; s_state = &priv->mfunc.master.slave_state[i]; s_state->last_cmd = MLX4_COMM_CMD_RESET; + mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) s_state->event_eq[j].eqn = -1; __raw_writel((__force u32) 0, @@ -2233,6 +2341,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) &priv->mfunc.comm[i].slave_read); mmiowb(); for (port = 1; port <= MLX4_MAX_PORTS; port++) { + struct mlx4_vport_state *admin_vport; + struct mlx4_vport_state *oper_vport; + s_state->vlan_filter[port] = kzalloc(sizeof(struct mlx4_vlan_fltr), GFP_KERNEL); @@ -2241,15 +2352,31 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) kfree(s_state->vlan_filter[port]); goto err_slaves; } + + admin_vport = &vf_admin->vport[port]; + oper_vport = &vf_oper->vport[port].state; INIT_LIST_HEAD(&s_state->mcast_filters[port]); - priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT; - priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT; - priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX; - priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX; + admin_vport->default_vlan = MLX4_VGT; + oper_vport->default_vlan = MLX4_VGT; + admin_vport->qos_vport = + MLX4_VPP_DEFAULT_VPORT; + oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT; + vf_oper->vport[port].vlan_idx = NO_INDX; + vf_oper->vport[port].mac_idx = NO_INDX; + mlx4_set_random_admin_guid(dev, i, port); } spin_lock_init(&s_state->lock); } + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) { + for (port = 1; port <= dev->caps.num_ports; port++) { + if (mlx4_is_eth(dev, port)) { + mlx4_set_default_port_qos(dev, port); + mlx4_allocate_port_vpps(dev, port); + } + } + } + memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; INIT_WORK(&priv->mfunc.master.comm_work, @@ -2670,6 +2797,103 @@ static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) return port; } +static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port, + int max_tx_rate) +{ + int i; + int err; + struct mlx4_qos_manager *port_qos; + struct mlx4_dev *dev = &priv->dev; + struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP]; + + port_qos = &priv->mfunc.master.qos_ctl[port]; + memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP); + + if (slave > port_qos->num_of_qos_vfs) { + mlx4_info(dev, "No availible VPP resources for this VF\n"); + return -EINVAL; + } + + /* Query for default QoS values from Vport 0 is needed */ + err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos); + if (err) { + mlx4_info(dev, "Failed to query Vport 0 QoS values\n"); + return err; + } + + for (i = 0; i < MLX4_NUM_UP; i++) { + if (test_bit(i, port_qos->priority_bm) && max_tx_rate) { + vpp_qos[i].max_avg_bw = max_tx_rate; + vpp_qos[i].enable = 1; + } else { + /* if user supplied tx_rate == 0, meaning no rate limit + * configuration is required. so we are leaving the + * value of max_avg_bw as queried from Vport 0. + */ + vpp_qos[i].enable = 0; + } + } + + err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos); + if (err) { + mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave); + return err; + } + + return 0; +} + +static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port, + struct mlx4_vport_state *vf_admin) +{ + struct mlx4_qos_manager *info; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) + return false; + + info = &priv->mfunc.master.qos_ctl[port]; + + if (vf_admin->default_vlan != MLX4_VGT && + test_bit(vf_admin->default_qos, info->priority_bm)) + return true; + + return false; +} + +static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port, + struct mlx4_vport_state *vf_admin, + int vlan, int qos) +{ + struct mlx4_vport_state dummy_admin = {0}; + + if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) || + !vf_admin->tx_rate) + return true; + + dummy_admin.default_qos = qos; + dummy_admin.default_vlan = vlan; + + /* VF wants to move to other VST state which is valid with current + * rate limit. Either differnt default vlan in VST or other + * supported QoS priority. Otherwise we don't allow this change when + * the TX rate is still configured. + */ + if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin)) + return true; + + mlx4_info(dev, "Cannot change VF state to %s while rate is set\n", + (vlan == MLX4_VGT) ? "VGT" : "VST"); + + if (vlan != MLX4_VGT) + mlx4_info(dev, "VST priority %d not supported for QoS\n", qos); + + mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n"); + + return false; +} + int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2713,12 +2937,22 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) port = mlx4_slaves_closest_port(dev, slave, port); vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos)) + return -EPERM; + if ((0 == vlan) && (0 == qos)) vf_admin->default_vlan = MLX4_VGT; else vf_admin->default_vlan = vlan; vf_admin->default_qos = qos; + /* If rate was configured prior to VST, we saved the configured rate + * in vf_admin->rate and now, if priority supported we enforce the QoS + */ + if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) && + vf_admin->tx_rate) + vf_admin->qos_vport = slave; + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) mlx4_info(dev, "updating vf %d port %d config will take effect on next VF restart\n", @@ -2727,6 +2961,69 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); +int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, + int max_tx_rate) +{ + int err; + int slave; + struct mlx4_vport_state *vf_admin; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) + return -EPROTONOSUPPORT; + + if (min_tx_rate) { + mlx4_info(dev, "Minimum BW share not supported\n"); + return -EPROTONOSUPPORT; + } + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate); + if (err) { + mlx4_info(dev, "vf %d failed to set rate %d\n", vf, + max_tx_rate); + return err; + } + + vf_admin->tx_rate = max_tx_rate; + /* if VF is not in supported mode (VST with supported prio), + * we do not change vport configuration for its QPs, but save + * the rate, so it will be enforced when it moves to supported + * mode next time. + */ + if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) { + mlx4_info(dev, + "rate set for VF %d when not in valid state\n", vf); + + if (vf_admin->default_vlan != MLX4_VGT) + mlx4_info(dev, "VST priority not supported by QoS\n"); + else + mlx4_info(dev, "VF in VGT mode (needed VST)\n"); + + mlx4_info(dev, + "rate %d take affect when VF moves to valid state\n", + max_tx_rate); + return 0; + } + + /* If user sets rate 0 assigning default vport for its QPs */ + vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT; + + if (priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) + mlx4_master_immediate_activate_vlan_qos(priv, slave, port); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_rate); + /* mlx4_get_slave_default_vlan - * return true if VST ( default vlan) * if VST, will return vlan & qos (if not NULL) @@ -2800,7 +3097,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in ivf->vlan = s_info->default_vlan; ivf->qos = s_info->default_qos; - ivf->max_tx_rate = s_info->tx_rate; + + if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info)) + ivf->max_tx_rate = s_info->tx_rate; + else + ivf->max_tx_rate = 0; + ivf->min_tx_rate = 0; ivf->spoofchk = s_info->spoofchk; ivf->linkstate = s_info->link_state; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 90b5309cdb5c..8a083d73efdb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -164,20 +164,19 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) * Read the timecounter and return the correct value in ns after converting * it into a struct timespec. **/ -static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) { struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, ptp_clock_info); unsigned long flags; - u32 remainder; u64 ns; write_lock_irqsave(&mdev->clock_lock, flags); ns = timecounter_read(&mdev->clock); write_unlock_irqrestore(&mdev->clock_lock, flags); - ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -191,11 +190,11 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * wall timer value. **/ static int mlx4_en_phc_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, ptp_clock_info); - u64 ns = timespec_to_ns(ts); + u64 ns = timespec64_to_ns(ts); unsigned long flags; /* reset the timecounter */ @@ -232,8 +231,8 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { .pps = 0, .adjfreq = mlx4_en_phc_adjfreq, .adjtime = mlx4_en_phc_adjtime, - .gettime = mlx4_en_phc_gettime, - .settime = mlx4_en_phc_settime, + .gettime64 = mlx4_en_phc_gettime, + .settime64 = mlx4_en_phc_settime, .enable = mlx4_en_phc_enable, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index c95ca252187c..f01918c63f28 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -35,6 +35,50 @@ #include <linux/math64.h> #include "mlx4_en.h" +#include "fw_qos.h" + +/* Definitions for QCN + */ + +struct mlx4_congestion_control_mb_prio_802_1_qau_params { + __be32 modify_enable_high; + __be32 modify_enable_low; + __be32 reserved1; + __be32 extended_enable; + __be32 rppp_max_rps; + __be32 rpg_time_reset; + __be32 rpg_byte_reset; + __be32 rpg_threshold; + __be32 rpg_max_rate; + __be32 rpg_ai_rate; + __be32 rpg_hai_rate; + __be32 rpg_gd; + __be32 rpg_min_dec_fac; + __be32 rpg_min_rate; + __be32 max_time_rise; + __be32 max_byte_rise; + __be32 max_qdelta; + __be32 min_qoffset; + __be32 gd_coefficient; + __be32 reserved2[5]; + __be32 cp_sample_base; + __be32 reserved3[39]; +}; + +struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { + __be64 rppp_rp_centiseconds; + __be32 reserved1; + __be32 ignored_cnm; + __be32 rppp_created_rps; + __be32 estimated_total_rate; + __be32 max_active_rate_limiter_index; + __be32 dropped_cnms_busy_fw; + __be32 reserved2; + __be32 cnms_handled_successfully; + __be32 min_total_limiters_rate; + __be32 max_total_limiters_rate; + __be32 reserved3[4]; +}; static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) @@ -183,6 +227,10 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, prof->rx_ppp); if (err) en_err(priv, "Failed setting pause params\n"); + else + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + prof->rx_ppp, prof->rx_pause, + prof->tx_ppp, prof->tx_pause); return err; } @@ -242,6 +290,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, return 0; } +#define RPG_ENABLE_BIT 31 +#define CN_TAG_BIT 30 + +static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, + inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + + qcn->rpg_enable[i] = + be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT; + qcn->rppp_max_rps[i] = + be32_to_cpu(hw_qcn->rppp_max_rps); + qcn->rpg_time_reset[i] = + be32_to_cpu(hw_qcn->rpg_time_reset); + qcn->rpg_byte_reset[i] = + be32_to_cpu(hw_qcn->rpg_byte_reset); + qcn->rpg_threshold[i] = + be32_to_cpu(hw_qcn->rpg_threshold); + qcn->rpg_max_rate[i] = + be32_to_cpu(hw_qcn->rpg_max_rate); + qcn->rpg_ai_rate[i] = + be32_to_cpu(hw_qcn->rpg_ai_rate); + qcn->rpg_hai_rate[i] = + be32_to_cpu(hw_qcn->rpg_hai_rate); + qcn->rpg_gd[i] = + be32_to_cpu(hw_qcn->rpg_gd); + qcn->rpg_min_dec_fac[i] = + be32_to_cpu(hw_qcn->rpg_min_dec_fac); + qcn->rpg_min_rate[i] = + be32_to_cpu(hw_qcn->rpg_min_rate); + qcn->cndd_state_machine[i] = + priv->cndd_state[i]; + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + +static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_in = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; +#define MODIFY_ENABLE_HIGH_MASK 0xc0000000 +#define MODIFY_ENABLE_LOW_MASK 0xffc00000 + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_in)) + return -ENOMEM; + + mailbox_in_dma = mailbox_in->dma; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + + /* Before updating QCN parameter, + * need to set it's modify enable bit to 1 + */ + + hw_qcn->modify_enable_high = cpu_to_be32( + MODIFY_ENABLE_HIGH_MASK); + hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK); + + hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT); + hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]); + hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]); + hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]); + hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]); + hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]); + hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]); + hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]); + hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]); + hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]); + hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]); + priv->cndd_state[i] = qcn->cndd_state_machine[i]; + if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY) + hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT); + + err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod, + MLX4_CONGESTION_CONTROL_SET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return err; + } + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return 0; +} + +static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, + struct ieee_qcn_stats *qcn_stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + + hw_qcn_stats = + (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, inmod, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + qcn_stats->rppp_rp_centiseconds[i] = + be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds); + qcn_stats->rppp_created_rps[i] = + be32_to_cpu(hw_qcn_stats->rppp_created_rps); + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { .ieee_getets = mlx4_en_dcbnl_ieee_getets, .ieee_setets = mlx4_en_dcbnl_ieee_setets, @@ -252,6 +472,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, + .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, + .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, + .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, }; const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a7b58ba8492b..3f44e2bbb982 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -38,6 +38,7 @@ #include <linux/mlx4/device.h> #include <linux/in.h> #include <net/ip.h> +#include <linux/bitmap.h> #include "mlx4_en.h" #include "en_port.h" @@ -104,6 +105,7 @@ static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { }; static const char main_strings[][ETH_GSTRING_LEN] = { + /* main statistics */ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", "rx_length_errors", "rx_over_errors", "rx_crc_errors", @@ -117,14 +119,76 @@ static const char main_strings[][ETH_GSTRING_LEN] = { "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", + /* priority flow control statistics rx */ + "rx_pause_prio_0", "rx_pause_duration_prio_0", + "rx_pause_transition_prio_0", + "rx_pause_prio_1", "rx_pause_duration_prio_1", + "rx_pause_transition_prio_1", + "rx_pause_prio_2", "rx_pause_duration_prio_2", + "rx_pause_transition_prio_2", + "rx_pause_prio_3", "rx_pause_duration_prio_3", + "rx_pause_transition_prio_3", + "rx_pause_prio_4", "rx_pause_duration_prio_4", + "rx_pause_transition_prio_4", + "rx_pause_prio_5", "rx_pause_duration_prio_5", + "rx_pause_transition_prio_5", + "rx_pause_prio_6", "rx_pause_duration_prio_6", + "rx_pause_transition_prio_6", + "rx_pause_prio_7", "rx_pause_duration_prio_7", + "rx_pause_transition_prio_7", + + /* flow control statistics rx */ + "rx_pause", "rx_pause_duration", "rx_pause_transition", + + /* priority flow control statistics tx */ + "tx_pause_prio_0", "tx_pause_duration_prio_0", + "tx_pause_transition_prio_0", + "tx_pause_prio_1", "tx_pause_duration_prio_1", + "tx_pause_transition_prio_1", + "tx_pause_prio_2", "tx_pause_duration_prio_2", + "tx_pause_transition_prio_2", + "tx_pause_prio_3", "tx_pause_duration_prio_3", + "tx_pause_transition_prio_3", + "tx_pause_prio_4", "tx_pause_duration_prio_4", + "tx_pause_transition_prio_4", + "tx_pause_prio_5", "tx_pause_duration_prio_5", + "tx_pause_transition_prio_5", + "tx_pause_prio_6", "tx_pause_duration_prio_6", + "tx_pause_transition_prio_6", + "tx_pause_prio_7", "tx_pause_duration_prio_7", + "tx_pause_transition_prio_7", + + /* flow control statistics tx */ + "tx_pause", "tx_pause_duration", "tx_pause_transition", + /* packet statistics */ - "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", - "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0", - "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5", - "tx_prio_6", "tx_prio_7", + "rx_multicast_packets", + "rx_broadcast_packets", + "rx_jabbers", + "rx_in_range_length_error", + "rx_out_range_length_error", + "tx_multicast_packets", + "tx_broadcast_packets", + "rx_prio_0_packets", "rx_prio_0_bytes", + "rx_prio_1_packets", "rx_prio_1_bytes", + "rx_prio_2_packets", "rx_prio_2_bytes", + "rx_prio_3_packets", "rx_prio_3_bytes", + "rx_prio_4_packets", "rx_prio_4_bytes", + "rx_prio_5_packets", "rx_prio_5_bytes", + "rx_prio_6_packets", "rx_prio_6_bytes", + "rx_prio_7_packets", "rx_prio_7_bytes", + "rx_novlan_packets", "rx_novlan_bytes", + "tx_prio_0_packets", "tx_prio_0_bytes", + "tx_prio_1_packets", "tx_prio_1_bytes", + "tx_prio_2_packets", "tx_prio_2_bytes", + "tx_prio_3_packets", "tx_prio_3_bytes", + "tx_prio_4_packets", "tx_prio_4_bytes", + "tx_prio_5_packets", "tx_prio_5_bytes", + "tx_prio_6_packets", "tx_prio_6_bytes", + "tx_prio_7_packets", "tx_prio_7_bytes", + "tx_novlan_packets", "tx_novlan_bytes", + }; -#define NUM_MAIN_STATS 21 -#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { "Interrupt Test", @@ -224,14 +288,50 @@ static int mlx4_en_set_wol(struct net_device *netdev, return err; } +struct bitmap_iterator { + unsigned long *stats_bitmap; + unsigned int count; + unsigned int iterator; + bool advance_array; /* if set, force no increments */ +}; + +static inline void bitmap_iterator_init(struct bitmap_iterator *h, + unsigned long *stats_bitmap, + int count) +{ + h->iterator = 0; + h->advance_array = !bitmap_empty(stats_bitmap, count); + h->count = h->advance_array ? bitmap_weight(stats_bitmap, count) + : count; + h->stats_bitmap = stats_bitmap; +} + +static inline int bitmap_iterator_test(struct bitmap_iterator *h) +{ + return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap); +} + +static inline int bitmap_iterator_inc(struct bitmap_iterator *h) +{ + return h->iterator++; +} + +static inline unsigned int +bitmap_iterator_count(struct bitmap_iterator *h) +{ + return h->count; +} + static int mlx4_en_get_sset_count(struct net_device *dev, int sset) { struct mlx4_en_priv *priv = netdev_priv(dev); - int bit_count = hweight64(priv->stats_bitmap); + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); switch (sset) { case ETH_SS_STATS: - return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + + return bitmap_iterator_count(&it) + (priv->tx_ring_num * 2) + #ifdef CONFIG_NET_RX_BUSY_POLL (priv->rx_ring_num * 5); @@ -253,34 +353,45 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); int index = 0; - int i, j = 0; + int i; + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); spin_lock_bh(&priv->stats_lock); - if (!(priv->stats_bitmap)) { - for (i = 0; i < NUM_MAIN_STATS; i++) - data[index++] = - ((unsigned long *) &priv->stats)[i]; - for (i = 0; i < NUM_PORT_STATS; i++) + for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->stats)[i]; + + for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->port_stats)[i]; + + for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX; + i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) data[index++] = - ((unsigned long *) &priv->port_stats)[i]; - for (i = 0; i < NUM_PKT_STATS; i++) + ((u64 *)&priv->rx_priority_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((u64 *)&priv->rx_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX; + i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) data[index++] = - ((unsigned long *) &priv->pkstats)[i]; - } else { - for (i = 0; i < NUM_MAIN_STATS; i++) { - if ((priv->stats_bitmap >> j) & 1) - data[index++] = - ((unsigned long *) &priv->stats)[i]; - j++; - } - for (i = 0; i < NUM_PORT_STATS; i++) { - if ((priv->stats_bitmap >> j) & 1) - data[index++] = - ((unsigned long *) &priv->port_stats)[i]; - j++; - } - } + ((u64 *)&priv->tx_priority_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((u64 *)&priv->tx_flowstats)[i]; + + for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->pkstats)[i]; + for (i = 0; i < priv->tx_ring_num; i++) { data[index++] = priv->tx_ring[i]->packets; data[index++] = priv->tx_ring[i]->bytes; @@ -309,7 +420,10 @@ static void mlx4_en_get_strings(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); int index = 0; - int i; + int i, strings = 0; + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); switch (stringset) { case ETH_SS_TEST: @@ -322,29 +436,30 @@ static void mlx4_en_get_strings(struct net_device *dev, case ETH_SS_STATS: /* Add main counters */ - if (!priv->stats_bitmap) { - for (i = 0; i < NUM_MAIN_STATS; i++) + for (i = 0; i < NUM_MAIN_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < NUM_PORT_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[i]); - for (i = 0; i < NUM_PORT_STATS; i++) + main_strings[strings]); + + for (i = 0; i < NUM_FLOW_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[i + - NUM_MAIN_STATS]); - for (i = 0; i < NUM_PKT_STATS; i++) + main_strings[strings]); + + for (i = 0; i < NUM_PKT_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[i + - NUM_MAIN_STATS + - NUM_PORT_STATS]); - } else - for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) { - if ((priv->stats_bitmap >> i) & 1) { - strcpy(data + - (index++) * ETH_GSTRING_LEN, - main_strings[i]); - } - if (!(priv->stats_bitmap >> i)) - break; - } + main_strings[strings]); + for (i = 0; i < priv->tx_ring_num; i++) { sprintf(data + (index++) * ETH_GSTRING_LEN, "tx%d_packets", i); @@ -885,6 +1000,12 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, priv->prof->rx_ppp); if (err) en_err(priv, "Failed setting pause params\n"); + else + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + priv->prof->rx_ppp, + priv->prof->rx_pause, + priv->prof->tx_ppp, + priv->prof->tx_pause); return err; } @@ -1818,6 +1939,32 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev, return 0; } +static int mlx4_en_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + int err; + u16 beacon_duration; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON)) + return -EOPNOTSUPP; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + beacon_duration = PORT_BEACON_MAX_LIMIT; + break; + case ETHTOOL_ID_INACTIVE: + beacon_duration = 0; + break; + default: + return -EOPNOTSUPP; + } + + err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration); + return err; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, .get_settings = mlx4_en_get_settings, @@ -1827,6 +1974,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_sset_count = mlx4_en_get_sset_count, .get_ethtool_stats = mlx4_en_get_ethtool_stats, .self_test = mlx4_en_self_test, + .set_phys_id = mlx4_en_set_phys_id, .get_wol = mlx4_en_get_wol, .set_wol = mlx4_en_set_wol, .get_msglevel = mlx4_en_get_msglevel, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 58d5a07d0ff4..913b716ed2e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -103,6 +103,11 @@ void mlx4_en_update_loopback_state(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); + if (features & NETIF_F_LOOPBACK) + priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); + else + priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); + priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED| MLX4_EN_FLAG_ENABLE_HW_LOOPBACK); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4efb89..0f1afc085d58 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1685,7 +1685,7 @@ int mlx4_en_start_port(struct net_device *dev) } /* Attach rx QP to bradcast address */ - memset(&mc_list[10], 0xff, ETH_ALEN); + eth_broadcast_addr(&mc_list[10]); mc_list[5] = priv->port; /* needed for B0 steering support */ if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, priv->port, 0, MLX4_PROT_ETH, @@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev) /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task); - mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); - #ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); @@ -1788,7 +1786,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) } /* Detach All multicasts */ - memset(&mc_list[10], 0xff, ETH_ALEN); + eth_broadcast_addr(&mc_list[10]); mc_list[5] = priv->port; /* needed for B0 steering support */ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, priv->broadcast_id); @@ -1890,6 +1888,12 @@ static void mlx4_en_clear_stats(struct net_device *dev) memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); memset(&priv->port_stats, 0, sizeof(priv->port_stats)); + memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); + memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); + memset(&priv->rx_priority_flowstats, 0, + sizeof(priv->rx_priority_flowstats)); + memset(&priv->tx_priority_flowstats, 0, + sizeof(priv->tx_priority_flowstats)); for (i = 0; i < priv->tx_ring_num; i++) { priv->tx_ring[i]->bytes = 0; @@ -2191,31 +2195,50 @@ static int mlx4_en_set_features(struct net_device *netdev, netdev_features_t features) { struct mlx4_en_priv *priv = netdev_priv(netdev); + bool reset = false; int ret = 0; + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { + en_info(priv, "Turn %s RX-FCS\n", + (features & NETIF_F_RXFCS) ? "ON" : "OFF"); + reset = true; + } + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { + u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; + + en_info(priv, "Turn %s RX-ALL\n", + ignore_fcs_value ? "ON" : "OFF"); + ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, + priv->port, ignore_fcs_value); + if (ret) + return ret; + } + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { en_info(priv, "Turn %s RX vlan strip offload\n", (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); - ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, - features); - if (ret) - return ret; + reset = true; } if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) en_info(priv, "Turn %s TX vlan strip offload\n", (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); - if (features & NETIF_F_LOOPBACK) - priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); - else - priv->ctrl_flags &= - cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { + en_info(priv, "Turn %s loopback\n", + (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); + mlx4_en_update_loopback_state(netdev, features); + } - mlx4_en_update_loopback_state(netdev, features); + if (reset) { + ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + features); + if (ret) + return ret; + } return 0; - } static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) @@ -2238,6 +2261,16 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); } +static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, + max_tx_rate); +} + static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) { struct mlx4_en_priv *en_priv = netdev_priv(dev); @@ -2375,10 +2408,38 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { + features = vlan_features_check(skb, features); return vxlan_features_check(skb, features); } #endif +static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; + struct mlx4_update_qp_params params; + int err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) + return -EOPNOTSUPP; + + /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ + if (maxrate >> 12) { + params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; + params.rate_val = maxrate / 1000; + } else if (maxrate) { + params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; + params.rate_val = maxrate; + } else { /* zero serves to revoke the QP rate-limitation */ + params.rate_unit = 0; + params.rate_val = 0; + } + + err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, + ¶ms); + return err; +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2410,6 +2471,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, #endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2427,6 +2489,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, .ndo_set_vf_mac = mlx4_en_set_vf_mac, .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, + .ndo_set_vf_rate = mlx4_en_set_vf_rate, .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, .ndo_get_vf_config = mlx4_en_get_vf_config, @@ -2444,6 +2507,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, #endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, }; struct mlx4_en_bond { @@ -2620,6 +2684,82 @@ int mlx4_en_netdev_event(struct notifier_block *this, return NOTIFY_DONE; } +void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause) +{ + int last_i = NUM_MAIN_STATS + NUM_PORT_STATS; + + if (!mlx4_is_slave(dev) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { + mutex_lock(&stats_bitmap->mutex); + bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); + + if (rx_ppp) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_PRIORITY_STATS_RX); + last_i += NUM_FLOW_PRIORITY_STATS_RX; + + if (rx_pause && !(rx_ppp)) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_STATS_RX); + last_i += NUM_FLOW_STATS_RX; + + if (tx_ppp) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_PRIORITY_STATS_TX); + last_i += NUM_FLOW_PRIORITY_STATS_TX; + + if (tx_pause && !(tx_ppp)) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_STATS_TX); + last_i += NUM_FLOW_STATS_TX; + + mutex_unlock(&stats_bitmap->mutex); + } +} + +void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause) +{ + int last_i = 0; + + mutex_init(&stats_bitmap->mutex); + bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); + + if (mlx4_is_slave(dev)) { + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_packets), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_packets), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_bytes), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_bytes), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_dropped), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_dropped), 1); + } else { + bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); + } + last_i += NUM_MAIN_STATS; + + bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); + last_i += NUM_PORT_STATS; + + mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, + rx_ppp, rx_pause, + tx_ppp, tx_pause); + last_i += NUM_FLOW_STATS; + + if (!mlx4_is_slave(dev)) + bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); +} + int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { @@ -2695,7 +2835,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { - if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { en_info(priv, "enabling only PFC DCB ops\n"); @@ -2782,6 +2922,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) + dev->hw_features |= NETIF_F_RXFCS; + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) + dev->hw_features |= NETIF_F_RXALL; + if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) @@ -2807,13 +2953,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, netif_carrier_off(dev); mlx4_en_set_default_moderation(priv); - err = register_netdev(dev); - if (err) { - en_err(priv, "Netdev registration failed for port %d\n", port); - goto out; - } - priv->registered = 1; - en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); @@ -2853,6 +2992,20 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); + mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, + mdev->profile.prof[priv->port].rx_ppp, + mdev->profile.prof[priv->port].rx_pause, + mdev->profile.prof[priv->port].tx_ppp, + mdev->profile.prof[priv->port].tx_pause); + + err = register_netdev(dev); + if (err) { + en_err(priv, "Netdev registration failed for port %d\n", port); + goto out; + } + + priv->registered = 1; + return 0; out: @@ -2871,7 +3024,8 @@ int mlx4_en_reset_config(struct net_device *dev, if (priv->hwtstamp_config.tx_type == ts_config.tx_type && priv->hwtstamp_config.rx_filter == ts_config.rx_filter && - !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) return 0; /* Nothing to change */ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && @@ -2910,6 +3064,13 @@ int mlx4_en_reset_config(struct net_device *dev, dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; } + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { + if (features & NETIF_F_RXFCS) + dev->features |= NETIF_F_RXFCS; + else + dev->features &= ~NETIF_F_RXFCS; + } + /* RX vlan offload and RX time-stamping can't co-exist ! * Regardless of the caller's choice, * Turn Off RX vlan offload in case of time-stamping is ON diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 6cb80072af6c..54f0e5ab2e55 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -128,9 +128,29 @@ out: return err; } +/* Each counter set is located in struct mlx4_en_stat_out_mbox + * with a const offset between its prio components. + * This function runs over a counter set and sum all of it's prio components. + */ +static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) +{ + __be64 *curr = start; + unsigned long ret = 0; + int i; + int offset = next - start; + + for (i = 0; i <= num; i++) { + ret += be64_to_cpu(*curr); + curr += offset; + } + + return ret; +} + int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) { struct mlx4_en_stat_out_mbox *mlx4_en_stats; + struct mlx4_en_stat_out_flow_control_mbox *flowstats; struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); struct net_device_stats *stats = &priv->stats; struct mlx4_cmd_mailbox *mailbox; @@ -183,22 +203,25 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.xmit_more += ring->xmit_more; } + /* net device stats */ stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + - be32_to_cpu(mlx4_en_stats->RdropLength) + be32_to_cpu(mlx4_en_stats->RJBBR) + be32_to_cpu(mlx4_en_stats->RCRC) + - be32_to_cpu(mlx4_en_stats->RRUNT); - stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP); - stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) + - be64_to_cpu(mlx4_en_stats->MCAST_prio_1) + - be64_to_cpu(mlx4_en_stats->MCAST_prio_2) + - be64_to_cpu(mlx4_en_stats->MCAST_prio_3) + - be64_to_cpu(mlx4_en_stats->MCAST_prio_4) + - be64_to_cpu(mlx4_en_stats->MCAST_prio_5) + - be64_to_cpu(mlx4_en_stats->MCAST_prio_6) + - be64_to_cpu(mlx4_en_stats->MCAST_prio_7) + - be64_to_cpu(mlx4_en_stats->MCAST_novlan); + be32_to_cpu(mlx4_en_stats->RRUNT) + + be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) + + be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) + + be32_to_cpu(mlx4_en_stats->RSHORT) + + en_stats_adder(&mlx4_en_stats->RGIANT_prio_0, + &mlx4_en_stats->RGIANT_prio_1, + NUM_PRIORITIES); + stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0, + &mlx4_en_stats->TGIANT_prio_1, + NUM_PRIORITIES); + stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, + &mlx4_en_stats->MCAST_prio_1, + NUM_PRIORITIES); stats->collisions = 0; + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); @@ -210,33 +233,116 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->tx_fifo_errors = 0; stats->tx_heartbeat_errors = 0; stats->tx_window_errors = 0; + stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); + + /* RX stats */ + priv->pkstats.rx_multicast_packets = stats->multicast; + priv->pkstats.rx_broadcast_packets = + en_stats_adder(&mlx4_en_stats->RBCAST_prio_0, + &mlx4_en_stats->RBCAST_prio_1, + NUM_PRIORITIES); + priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR); + priv->pkstats.rx_in_range_length_error = + be64_to_cpu(mlx4_en_stats->RInRangeLengthErr); + priv->pkstats.rx_out_range_length_error = + be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr); + + /* Tx stats */ + priv->pkstats.tx_multicast_packets = + en_stats_adder(&mlx4_en_stats->TMCAST_prio_0, + &mlx4_en_stats->TMCAST_prio_1, + NUM_PRIORITIES); + priv->pkstats.tx_broadcast_packets = + en_stats_adder(&mlx4_en_stats->TBCAST_prio_0, + &mlx4_en_stats->TBCAST_prio_1, + NUM_PRIORITIES); + + priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); + priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0); + priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); + priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1); + priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); + priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2); + priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); + priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3); + priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); + priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4); + priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); + priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5); + priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); + priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6); + priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); + priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7); + priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan); + priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan); + priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); + priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0); + priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); + priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1); + priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); + priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2); + priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); + priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3); + priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); + priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4); + priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); + priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5); + priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); + priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6); + priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); + priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7); + priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); + priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); + + spin_unlock_bh(&priv->stats_lock); + + /* 0xffs indicates invalid value */ + memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { + memset(mailbox->buf, 0, + sizeof(*flowstats) * MLX4_NUM_PRIORITIES); + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, + in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, + 0, MLX4_CMD_DUMP_ETH_STATS, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + if (err) + goto out; + } + + flowstats = mailbox->buf; + + spin_lock_bh(&priv->stats_lock); + + for (i = 0; i < MLX4_NUM_PRIORITIES; i++) { + priv->rx_priority_flowstats[i].rx_pause = + be64_to_cpu(flowstats[i].rx_pause); + priv->rx_priority_flowstats[i].rx_pause_duration = + be64_to_cpu(flowstats[i].rx_pause_duration); + priv->rx_priority_flowstats[i].rx_pause_transition = + be64_to_cpu(flowstats[i].rx_pause_transition); + priv->tx_priority_flowstats[i].tx_pause = + be64_to_cpu(flowstats[i].tx_pause); + priv->tx_priority_flowstats[i].tx_pause_duration = + be64_to_cpu(flowstats[i].tx_pause_duration); + priv->tx_priority_flowstats[i].tx_pause_transition = + be64_to_cpu(flowstats[i].tx_pause_transition); + } + + /* if pfc is not in use, all priorities counters have the same value */ + priv->rx_flowstats.rx_pause = + be64_to_cpu(flowstats[0].rx_pause); + priv->rx_flowstats.rx_pause_duration = + be64_to_cpu(flowstats[0].rx_pause_duration); + priv->rx_flowstats.rx_pause_transition = + be64_to_cpu(flowstats[0].rx_pause_transition); + priv->tx_flowstats.tx_pause = + be64_to_cpu(flowstats[0].tx_pause); + priv->tx_flowstats.tx_pause_duration = + be64_to_cpu(flowstats[0].tx_pause_duration); + priv->tx_flowstats.tx_pause_transition = + be64_to_cpu(flowstats[0].tx_pause_transition); - priv->pkstats.broadcast = - be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) + - be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) + - be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) + - be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) + - be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) + - be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) + - be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) + - be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) + - be64_to_cpu(mlx4_en_stats->RBCAST_novlan); - priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); - priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); - priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); - priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); - priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); - priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); - priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); - priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); - priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); - priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); - priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); - priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); - priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); - priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); - priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); - priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); spin_unlock_bh(&priv->stats_lock); out: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 698d60de1255..4fdd3c37e47b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -771,7 +771,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* * make sure we read the CQE after we read the ownership bit */ - rmb(); + dma_rmb(); /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == @@ -1116,7 +1116,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, /* Cancel FCS removal if FW allows */ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { context->param3 |= cpu_to_be32(1 << 29); - ring->fcs_del = ETH_FCS_LEN; + if (priv->dev->features & NETIF_F_RXFCS) + ring->fcs_del = 0; + else + ring->fcs_del = ETH_FCS_LEN; } else ring->fcs_del = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 2d8ee66138e8..b66e03d9711f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -66,7 +66,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); packet = (unsigned char *)skb_put(skb, packet_size); memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); - memset(ethh->h_source, 0, ETH_ALEN); + eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_set_mac_header(skb, 0); for (i = 0; i < packet_size; ++i) /* fill our packet */ @@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) { u32 loopback_ok = 0; int i; - + bool gro_enabled; priv->loopback_ok = 0; priv->validate_loopback = 1; + gro_enabled = priv->dev->features & NETIF_F_GRO; mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + priv->dev->features &= ~NETIF_F_GRO; /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { @@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) mlx4_en_test_loopback_exit: priv->validate_loopback = 0; + + if (gro_enabled) + priv->dev->features |= NETIF_F_GRO; + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); return !loopback_ok; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 55f9f5c5344e..1783705273d8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -416,7 +416,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, * make sure we read the CQE after we read the * ownership bit */ - rmb(); + dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { @@ -667,7 +667,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, skb_frag_size(&shinfo->frags[0])); } - wmb(); + dma_wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); } } @@ -804,7 +804,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) data->addr = cpu_to_be64(dma); data->lkey = ring->mr_key; - wmb(); + dma_wmb(); data->byte_count = cpu_to_be32(byte_count); --data; } @@ -821,7 +821,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) data->addr = cpu_to_be64(dma); data->lkey = ring->mr_key; - wmb(); + dma_wmb(); data->byte_count = cpu_to_be32(byte_count); } /* tx completion can avoid cache line miss for common cases */ @@ -938,7 +938,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Ensure new descriptor hits memory * before setting ownership of this descriptor to HW */ - wmb(); + dma_wmb(); tx_desc->ctrl.owner_opcode = op_own; wmb(); @@ -958,7 +958,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Ensure new descriptor hits memory * before setting ownership of this descriptor to HW */ - wmb(); + dma_wmb(); tx_desc->ctrl.owner_opcode = op_own; if (send_doorbell) { wmb(); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 264bc15c1ff2..2619c9fbf42d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work) /* All active slaves need to receive the event */ if (slave == ALL_SLAVES) { - for (i = 0; i < dev->num_slaves; i++) { - if (i != dev->caps.function && - master->slave_state[i].active) - if (mlx4_GEN_EQE(dev, i, eqe)) - mlx4_warn(dev, "Failed to generate event for slave %d\n", - i); + for (i = 0; i <= dev->persist->num_vfs; i++) { + if (mlx4_GEN_EQE(dev, i, eqe)) + mlx4_warn(dev, "Failed to generate event for slave %d\n", + i); } } else { if (mlx4_GEN_EQE(dev, slave, eqe)) @@ -190,7 +188,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); s_eqe->slave_id = slave; /* ensure all information is written before setting the ownersip bit */ - wmb(); + dma_wmb(); s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; ++slave_eq->prod; @@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_slave_state *s_slave = - &priv->mfunc.master.slave_state[slave]; - if (!s_slave->active) { - /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ + if (slave < 0 || slave > dev->persist->num_vfs || + slave == dev->caps.function || + !priv->mfunc.master.slave_state[slave].active) return; - } slave_event(dev, slave, eqe); } @@ -477,7 +473,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) * Make sure we read EQ entry contents after we've * checked the ownership bit. */ - rmb(); + dma_rmb(); switch (eqe->type) { case MLX4_EVENT_TYPE_COMP: @@ -706,6 +702,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; } spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, + flr_slave); queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_flr_event_work); break; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 5a21e5dc94cb..a4079811b176 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -49,9 +49,9 @@ enum { extern void __buggy_use_of_MLX4_GET(void); extern void __buggy_use_of_MLX4_PUT(void); -static bool enable_qos; +static bool enable_qos = true; module_param(enable_qos, bool, 0444); -MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); +MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); #define MLX4_GET(dest, source, offset) \ do { \ @@ -105,6 +105,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) [41] = "Unicast VEP steering support", [42] = "Multicast VEP steering support", [48] = "Counters support", + [52] = "RSS IP fragments support", [53] = "Port ETS Scheduler support", [55] = "Port link type sensing support", [59] = "Port management change event support", @@ -143,7 +144,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [18] = "More than 80 VFs support", [19] = "Performance optimized for limited rule configuration flow steering support", [20] = "Recoverable error events support", - [21] = "Port Remap support" + [21] = "Port Remap support", + [22] = "QCN support", + [23] = "QP rate limiting support", + [24] = "Ethernet Flow control statistics support", + [25] = "Granular QoS per VF support", + [26] = "Port ETS Scheduler support", + [27] = "Port beacon support", + [28] = "RX-ALL support", }; int i; @@ -641,6 +649,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_RSS_OFFSET 0x2e #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 +#define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 @@ -670,12 +679,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 +#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a -#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a +#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -696,6 +706,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac +#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 + dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -768,15 +782,24 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->num_ports = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); dev_cap->max_msg_sz = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); + if (field & 0x10) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); dev_cap->fs_max_num_qp_per_entry = field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + if (field & 0x1) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); dev_cap->stat_rate_support = stat_rate; MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); @@ -856,6 +879,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 4)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; if (field & (1 << 5)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; if (field & (1 << 6)) @@ -869,6 +894,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); if (field & 0x20) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; + if (field & (1 << 2)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); @@ -882,6 +909,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); if (field & 1<<3) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; MLX4_GET(dev_cap->max_icm_sz, outbox, QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) @@ -900,6 +929,18 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + dev_cap->rl_caps.num_rates = size; + if (dev_cap->rl_caps.num_rates) { + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); + dev_cap->rl_caps.max_val = size & 0xfff; + dev_cap->rl_caps.max_unit = size >> 14; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); + dev_cap->rl_caps.min_val = size & 0xfff; + dev_cap->rl_caps.min_unit = size >> 14; + } + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; @@ -975,6 +1016,15 @@ void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->dmfs_high_rate_qpn_base); mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", dev_cap->dmfs_high_rate_qpn_range); + + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { + struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; + + mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", + rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, + rl_caps->min_unit, rl_caps->min_val); + } + dump_dev_cap_flags(dev, dev_cap->flags); dump_dev_cap_flags2(dev, dev_cap->flags2); } @@ -1058,6 +1108,7 @@ out: return err; } +#define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) @@ -1071,6 +1122,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, u64 flags; int err = 0; u8 field; + u16 field16; u32 bmme_flags, field32; int real_port; int slave_port; @@ -1101,6 +1153,9 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, } for (; slave_port < dev->caps.num_ports; ++slave_port) flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); + + /* Not exposing RSS IP fragments to guests */ + flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); @@ -1113,11 +1168,16 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); - /* For guests, disable vxlan tunneling */ + /* For guests, disable vxlan tunneling and QoS support */ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); - field &= 0xf7; + field &= 0xd7; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); + /* For guests, disable port BEACON */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + /* For guests, report Blueflame disabled */ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); field &= 0x7f; @@ -1146,9 +1206,28 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, /* turn off host side virt features (VST, FSM, etc) for guests */ MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | - DEV_CAP_EXT_2_FLAG_FSM); + DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + /* turn off QCN for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + field &= 0xfe; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + + /* turn off QP max-rate limiting for guests */ + field16 = 0; + MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + + /* turn off QoS per VF support for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + field &= 0xef; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + + /* turn off ignore FCS feature for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + field &= 0xfb; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + return 0; } @@ -1648,13 +1727,17 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); /* Enable QoS support if module parameter set */ - if (enable_qos) + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); /* enable counters */ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); + /* Enable RSS spread to fragmented IP packets when supported */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); + /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); @@ -1843,6 +1926,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, else param->steering_mode = MLX4_STEERING_MODE_A0; } + + if (dword_field & (1 << 13)) + param->rss_ip_frags = 1; + /* steering attributes */ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index f44f7f6017ed..07cb7c2461ad 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -127,6 +127,7 @@ struct mlx4_dev_cap { u32 max_counters; u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; + struct mlx4_rate_limit_caps rl_caps; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; }; @@ -202,6 +203,7 @@ struct mlx4_init_hca_param { u64 dev_cap_enabled; u16 cqe_size; /* For use only when CQE stride feature enabled */ u16 eqe_size; /* For use only when EQE stride feature enabled */ + u8 rss_ip_frags; }; struct mlx4_init_ib_param { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c new file mode 100644 index 000000000000..8f2fde0487c4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/export.h> +#include "fw_qos.h" +#include "fw.h" + +enum { + /* allocate vpp opcode modifiers */ + MLX4_ALLOCATE_VPP_ALLOCATE = 0x0, + MLX4_ALLOCATE_VPP_QUERY = 0x1 +}; + +enum { + /* set vport qos opcode modifiers */ + MLX4_SET_VPORT_QOS_SET = 0x0, + MLX4_SET_VPORT_QOS_QUERY = 0x1 +}; + +struct mlx4_set_port_prio2tc_context { + u8 prio2tc[4]; +}; + +struct mlx4_port_scheduler_tc_cfg_be { + __be16 pg; + __be16 bw_precentage; + __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */ + __be16 max_bw_value; +}; + +struct mlx4_set_port_scheduler_context { + struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; +}; + +/* Granular Qos (per VF) section */ +struct mlx4_alloc_vpp_param { + __be32 availible_vpp; + __be32 vpp_p_up[MLX4_NUM_UP]; +}; + +struct mlx4_prio_qos_param { + __be32 bw_share; + __be32 max_avg_bw; + __be32 reserved; + __be32 enable; + __be32 reserved1[4]; +}; + +struct mlx4_set_vport_context { + __be32 reserved[8]; + struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP]; +}; + +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_prio2tc_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i += 2) + context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; + + in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); + +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_scheduler_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + + for (i = 0; i < MLX4_NUM_TC; i++) { + struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; + u16 r; + + if (ratelimit && ratelimit[i]) { + if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) { + r = ratelimit[i]; + tc->max_bw_units = + htons(MLX4_RATELIMIT_100M_UNITS); + } else { + r = ratelimit[i] / 10; + tc->max_bw_units = + htons(MLX4_RATELIMIT_1G_UNITS); + } + tc->max_bw_value = htons(r); + } else { + tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT); + tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS); + } + + tc->pg = htons(pg[i]); + tc->bw_precentage = htons(tc_tx_bw[i]); + } + + in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); + +int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, + u16 *availible_vpp, u8 *vpp_p_up) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_alloc_vpp_param *out_param; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + out_param = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, + MLX4_ALLOCATE_VPP_QUERY, + MLX4_CMD_ALLOCATE_VPP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + /* Total number of supported VPPs */ + *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp); + + for (i = 0; i < MLX4_NUM_UP; i++) + vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get); + +int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_alloc_vpp_param *in_param; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + in_param = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i++) + in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]); + + err = mlx4_cmd(dev, mailbox->dma, port, + MLX4_ALLOCATE_VPP_ALLOCATE, + MLX4_CMD_ALLOCATE_VPP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set); + +int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *out_param) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vport_context *ctx; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ctx = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port, + MLX4_SET_VPORT_QOS_QUERY, + MLX4_CMD_SET_VPORT_QOS, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + for (i = 0; i < MLX4_NUM_UP; i++) { + out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share); + out_param[i].max_avg_bw = + be32_to_cpu(ctx->qos_p_up[i].max_avg_bw); + out_param[i].enable = + !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get); + +int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *in_param) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vport_context *ctx; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ctx = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i++) { + ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share); + ctx->qos_p_up[i].max_avg_bw = + cpu_to_be32(in_param[i].max_avg_bw); + ctx->qos_p_up[i].enable = + cpu_to_be32(in_param[i].enable << 31); + } + + err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port, + MLX4_SET_VPORT_QOS_SET, + MLX4_CMD_SET_VPORT_QOS, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h new file mode 100644 index 000000000000..ac1f331878e6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_FW_QOS_H +#define MLX4_FW_QOS_H + +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/device.h> + +#define MLX4_NUM_UP 8 +#define MLX4_NUM_TC 8 + +/* Default supported priorities for VPP allocation */ +#define MLX4_DEFAULT_QOS_PRIO (0) + +/* Derived from FW feature definition, 0 is the default vport fo all QPs */ +#define MLX4_VPP_DEFAULT_VPORT (0) + +struct mlx4_vport_qos_param { + u32 bw_share; + u32 max_avg_bw; + u8 enable; +}; + +/** + * mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic + * classes of a given port and device. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @prio2tc: Array of TC associated with each priorities. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); + +/** + * mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between + * traffic classes (ETS) and configured rate limit for traffic classes. + * tc_tx_bw, pg and ratelimit are arrays where each index represents a TC. + * The description for those parameters below refers to a single TC. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @tc_tx_bw: The percentage of the bandwidth allocated for traffic class + * within a TC group. The sum of the bw_percentage of all the traffic + * classes within a TC group must equal 100% for correct operation. + * @pg: The TC group the traffic class is associated with. + * @ratelimit: The maximal bandwidth allowed for the use by this traffic class. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit); +/** + * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation. + * Before distribution of VPPs to priorities, only availible_vpp is returned. + * After initialization it returns the distribution of VPPs among priorities. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @availible_vpp: Pointer to variable where number of availible VPPs is stored + * @vpp_p_up: Distribution of VPPs to priorities is stored in this array + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, + u16 *availible_vpp, u8 *vpp_p_up); +/** + * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities. + * The total number of VPPs assigned to all for a port must not exceed + * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get. + * VPP allocation is allowed only after the port type has been set, + * and while no QPs are open for this port. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vpp_p_up: Allocation of VPPs to different priorities. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up); + +/** + * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport. + * Each priority allowed for the Vport is assigned with a share of the BW, + * and a BW limitation. This commands query the current QoS values. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vport: Vport id. + * @out_param: Array of mlx4_vport_qos_param that will contain the values. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *out_param); + +/** + * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport. + * QoS parameters can be modified at any time, but must be initialized + * before any QP is associated with the VPort. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vport: Vport id. + * @out_param: Array of mlx4_vport_qos_param which holds the requested values. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *in_param); + +#endif /* MLX4_FW_QOS_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7e487223489a..ced5ecab5aa7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -297,6 +297,25 @@ static int mlx4_dev_port(struct mlx4_dev *dev, int port, return err; } +static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) +{ + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) + return; + + if (mlx4_is_mfunc(dev)) { + mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + return; + } + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { + mlx4_dbg(dev, + "Keep FCS is not supported - Disabling Ignore FCS"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + return; + } +} + #define MLX4_A0_STEERING_TABLE_SIZE 256 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { @@ -489,6 +508,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; } + dev->caps.rl_caps = dev_cap->rl_caps; + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = dev->caps.dmfs_high_rate_qpn_range; @@ -526,10 +547,20 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.alloc_res_qp_mask = (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | MLX4_RESERVE_A0_QP; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && + dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { + mlx4_warn(dev, "Old device ETS support detected\n"); + mlx4_warn(dev, "Consider upgrading device FW.\n"); + dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; + } + } else { dev->caps.alloc_res_qp_mask = 0; } + mlx4_enable_ignore_fcs(dev); + return 0; } @@ -883,6 +914,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); slave_adjust_steering_mode(dev, &dev_cap, &hca_param); + mlx4_dbg(dev, "RSS support for IP fragments is %s\n", + hca_param.rss_ip_frags ? "on" : "off"); if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && dev->caps.bf_reg_size) @@ -2227,6 +2260,37 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) } EXPORT_SYMBOL_GPL(mlx4_counter_free); +void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} +EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); + +__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->mfunc.master.vf_admin[entry].vport[port].guid; +} +EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); + +void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + __be64 guid; + + /* hw GUID */ + if (entry == 0) + return; + + get_random_bytes((char *)&guid, sizeof(guid)); + guid &= ~(cpu_to_be64(1ULL << 56)); + guid |= cpu_to_be64(1ULL << 57); + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} + static int mlx4_setup_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 1409d0cd6143..502d3dd2c888 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -50,6 +50,7 @@ #include <linux/mlx4/driver.h> #include <linux/mlx4/doorbell.h> #include <linux/mlx4/cmd.h> +#include "fw_qos.h" #define DRV_NAME "mlx4_core" #define PFX DRV_NAME ": " @@ -64,21 +65,6 @@ #define INIT_HCA_TPT_MW_ENABLE (1 << 7) -struct mlx4_set_port_prio2tc_context { - u8 prio2tc[4]; -}; - -struct mlx4_port_scheduler_tc_cfg_be { - __be16 pg; - __be16 bw_precentage; - __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */ - __be16 max_bw_value; -}; - -struct mlx4_set_port_scheduler_context { - struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; -}; - enum { MLX4_HCR_BASE = 0x80680, MLX4_HCR_SIZE = 0x0001c, @@ -175,7 +161,7 @@ enum mlx4_res_tracker_free_type { /* *Virtual HCR structures. - * mlx4_vhcr is the sw representation, in machine endianess + * mlx4_vhcr is the sw representation, in machine endianness * * mlx4_vhcr_cmd is the formalized structure, the one that is passed * to FW to go through communication channel. @@ -512,6 +498,8 @@ struct mlx4_vport_state { u32 tx_rate; bool spoofchk; u32 link_state; + u8 qos_vport; + __be64 guid; }; struct mlx4_vf_admin_state { @@ -568,6 +556,11 @@ struct mlx4_slave_event_eq { struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; }; +struct mlx4_qos_manager { + int num_of_qos_vfs; + DECLARE_BITMAP(priority_bm, MLX4_NUM_UP); +}; + struct mlx4_master_qp0_state { int proxy_qp0_active; int qp0_active; @@ -592,6 +585,7 @@ struct mlx4_mfunc_master_ctx { struct mlx4_eqe cmd_eqe; struct mlx4_slave_event_eq slave_eq; struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; + struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1]; }; struct mlx4_mfunc { @@ -644,6 +638,7 @@ struct mlx4_vf_immed_vlan_work { int orig_vlan_ix; u8 port; u8 qos; + u8 qos_vport; u16 vlan_id; u16 orig_vlan_id; }; @@ -769,9 +764,11 @@ enum { struct mlx4_set_port_general_context { - u8 reserved[3]; + u16 reserved1; + u8 v_ignore_fcs; u8 flags; - u16 reserved2; + u8 ignore_fcs; + u8 reserved2; __be16 mtu; u8 pptx; u8 pfctx; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2a8268e6be15..9de30216b146 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -55,6 +55,7 @@ #include <linux/mlx4/cmd.h> #include "en_port.h" +#include "mlx4_stats.h" #define DRV_NAME "mlx4_en" #define DRV_VERSION "2.2-1" @@ -171,7 +172,6 @@ enum { /* Number of samples to 'average' */ #define AVG_SIZE 128 #define AVG_FACTOR 1024 -#define NUM_PERF_STATS NUM_PERF_COUNTERS #define INC_PERF_COUNTER(cnt) (++(cnt)) #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add)) @@ -182,7 +182,6 @@ enum { #else -#define NUM_PERF_STATS 0 #define INC_PERF_COUNTER(cnt) do {} while (0) #define ADD_PERF_COUNTER(cnt, add) do {} while (0) #define AVG_PERF_COUNTER(cnt, sample) do {} while (0) @@ -435,37 +434,6 @@ struct mlx4_en_port_state { u32 flags; }; -struct mlx4_en_pkt_stats { - unsigned long broadcast; - unsigned long rx_prio[8]; - unsigned long tx_prio[8]; -#define NUM_PKT_STATS 17 -}; - -struct mlx4_en_port_stats { - unsigned long tso_packets; - unsigned long xmit_more; - unsigned long queue_stopped; - unsigned long wake_queue; - unsigned long tx_timeout; - unsigned long rx_alloc_failed; - unsigned long rx_chksum_good; - unsigned long rx_chksum_none; - unsigned long rx_chksum_complete; - unsigned long tx_chksum_offload; -#define NUM_PORT_STATS 9 -}; - -struct mlx4_en_perf_stats { - u32 tx_poll; - u64 tx_pktsz_avg; - u32 inflight_avg; - u16 tx_coal_avg; - u16 rx_coal_avg; - u32 napi_quota; -#define NUM_PERF_COUNTERS 6 -}; - enum mlx4_en_mclist_act { MCLIST_NONE, MCLIST_REM, @@ -514,9 +482,15 @@ enum { MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), }; +#define PORT_BEACON_MAX_LIMIT (65535) #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) #define MLX4_EN_MAC_HASH_IDX 5 +struct mlx4_en_stats_bitmap { + DECLARE_BITMAP(bitmap, NUM_ALL_STATS); + struct mutex mutex; /* for mutual access to stats bitmap */ +}; + struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -592,8 +566,12 @@ struct mlx4_en_priv { #endif struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; + struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES]; + struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES]; + struct mlx4_en_flow_stats_rx rx_flowstats; + struct mlx4_en_flow_stats_tx tx_flowstats; struct mlx4_en_port_stats port_stats; - u64 stats_bitmap; + struct mlx4_en_stats_bitmap stats_bitmap; struct list_head mc_list; struct list_head curr_list; u64 broadcast_id; @@ -608,6 +586,7 @@ struct mlx4_en_priv { #ifdef CONFIG_MLX4_EN_DCB struct ieee_ets ets; u16 maxrate[IEEE_8021QAZ_MAX_TCS]; + enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; #endif #ifdef CONFIG_RFS_ACCEL spinlock_t filters_lock; @@ -761,6 +740,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, int mlx4_en_start_port(struct net_device *dev); void mlx4_en_stop_port(struct net_device *dev, int detach); +void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause); + void mlx4_en_free_resources(struct mlx4_en_priv *priv); int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); @@ -846,7 +830,10 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); int mlx4_en_reset_config(struct net_device *dev, struct hwtstamp_config ts_config, netdev_features_t new_features); - +void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause); int mlx4_en_netdev_event(struct notifier_block *this, unsigned long event, void *ptr); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h new file mode 100644 index 000000000000..00555832a4ae --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -0,0 +1,107 @@ +#ifndef _MLX4_STATS_ +#define _MLX4_STATS_ + +#ifdef MLX4_EN_PERF_STAT +#define NUM_PERF_STATS NUM_PERF_COUNTERS +#else +#define NUM_PERF_STATS 0 +#endif + +#define NUM_PRIORITIES 9 +#define NUM_PRIORITY_STATS 2 + +struct mlx4_en_pkt_stats { + unsigned long rx_multicast_packets; + unsigned long rx_broadcast_packets; + unsigned long rx_jabbers; + unsigned long rx_in_range_length_error; + unsigned long rx_out_range_length_error; + unsigned long tx_multicast_packets; + unsigned long tx_broadcast_packets; + unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; + unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; +#define NUM_PKT_STATS 43 +}; + +struct mlx4_en_port_stats { + unsigned long tso_packets; + unsigned long xmit_more; + unsigned long queue_stopped; + unsigned long wake_queue; + unsigned long tx_timeout; + unsigned long rx_alloc_failed; + unsigned long rx_chksum_good; + unsigned long rx_chksum_none; + unsigned long rx_chksum_complete; + unsigned long tx_chksum_offload; +#define NUM_PORT_STATS 10 +}; + +struct mlx4_en_perf_stats { + u32 tx_poll; + u64 tx_pktsz_avg; + u32 inflight_avg; + u16 tx_coal_avg; + u16 rx_coal_avg; + u32 napi_quota; +#define NUM_PERF_COUNTERS 6 +}; + +#define NUM_MAIN_STATS 21 + +#define MLX4_NUM_PRIORITIES 8 + +struct mlx4_en_flow_stats_rx { + u64 rx_pause; + u64 rx_pause_duration; + u64 rx_pause_transition; +#define NUM_FLOW_STATS_RX 3 +#define NUM_FLOW_PRIORITY_STATS_RX (NUM_FLOW_STATS_RX * \ + MLX4_NUM_PRIORITIES) +}; + +struct mlx4_en_flow_stats_tx { + u64 tx_pause; + u64 tx_pause_duration; + u64 tx_pause_transition; +#define NUM_FLOW_STATS_TX 3 +#define NUM_FLOW_PRIORITY_STATS_TX (NUM_FLOW_STATS_TX * \ + MLX4_NUM_PRIORITIES) +}; + +#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \ + NUM_FLOW_PRIORITY_STATS_TX + \ + NUM_FLOW_PRIORITY_STATS_RX) + +struct mlx4_en_stat_out_flow_control_mbox { + /* Total number of PAUSE frames received from the far-end port */ + __be64 rx_pause; + /* Total number of microseconds that far-end port requested to pause + * transmission of packets + */ + __be64 rx_pause_duration; + /* Number of received transmission from XOFF state to XON state */ + __be64 rx_pause_transition; + /* Total number of PAUSE frames sent from the far-end port */ + __be64 tx_pause; + /* Total time in microseconds that transmission of packets has been + * paused + */ + __be64 tx_pause_duration; + /* Number of transmitter transitions from XOFF state to XON state */ + __be64 tx_pause_transition; + /* Reserverd */ + __be64 reserved[2]; +}; + +enum { + MLX4_DUMP_ETH_STATS_FLOW_CONTROL = 1 << 12 +}; + +#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ + NUM_FLOW_STATS + NUM_PERF_STATS) + +#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ + sizeof(((struct net_device_stats *)0)->n)) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 9f268f05290a..c2b21313dba7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -38,6 +38,7 @@ #include <linux/mlx4/cmd.h> #include "mlx4.h" +#include "mlx4_stats.h" #define MLX4_MAC_VALID (1ull << 63) @@ -49,6 +50,9 @@ #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL +#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 +#define MLX4_IGNORE_FCS_MASK 0x1 + void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) { int i; @@ -127,8 +131,9 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); return err; @@ -341,8 +346,9 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); @@ -629,9 +635,9 @@ static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, MLX4_ROCE_GID_ENTRY_SIZE); err = mlx4_cmd(dev, mailbox->dma, - ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1, - MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); + ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), + MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); mutex_unlock(&(priv->port[port].gid_table.mutex)); return err; } @@ -837,6 +843,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, MLX4_CMD_NATIVE); } + /* Slaves are not allowed to SET_PORT beacon (LED) blink */ + if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) { + mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave); + return -EPERM; + } + /* For IB, we only consider: * - The capability mask, which is set to the aggregate of all * slave function capabilities @@ -945,8 +957,9 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | (vl_cap << MLX4_SET_PORT_VL_CAP)); - err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + err = mlx4_cmd(dev, mailbox->dma, port, + MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (err != -ENOMEM) break; } @@ -975,8 +988,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, context->pfcrx = pfcrx; in_mod = MLX4_SET_PORT_GENERAL << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; @@ -1012,84 +1026,40 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, context->vlan_miss = MLX4_VLAN_MISS_IDX; in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; } EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); -int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) +int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) { struct mlx4_cmd_mailbox *mailbox; - struct mlx4_set_port_prio2tc_context *context; - int err; + struct mlx4_set_port_general_context *context; u32 in_mod; - int i; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - context = mailbox->buf; - for (i = 0; i < MLX4_NUM_UP; i += 2) - context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; - - in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); - - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} -EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); - -int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, - u8 *pg, u16 *ratelimit) -{ - struct mlx4_cmd_mailbox *mailbox; - struct mlx4_set_port_scheduler_context *context; int err; - u32 in_mod; - int i; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; + context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; + if (ignore_fcs_value) + context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; + else + context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK; - for (i = 0; i < MLX4_NUM_TC; i++) { - struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; - u16 r; - - if (ratelimit && ratelimit[i]) { - if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) { - r = ratelimit[i]; - tc->max_bw_units = - htons(MLX4_RATELIMIT_100M_UNITS); - } else { - r = ratelimit[i]/10; - tc->max_bw_units = - htons(MLX4_RATELIMIT_1G_UNITS); - } - tc->max_bw_value = htons(r); - } else { - tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT); - tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS); - } - - tc->pg = htons(pg[i]); - tc->bw_precentage = htons(tc_tx_bw[i]); - } - - in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); return err; } -EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); +EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check); enum { VXLAN_ENABLE_MODIFY = 1 << 7, @@ -1125,14 +1095,35 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable) context->steering = steering; in_mod = MLX4_SET_PORT_VXLAN << 8 | port; - err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); return err; } EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); +int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + *((__be32 *)mailbox->buf) = cpu_to_be32(time); + + err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_BEACON); + int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1184,22 +1175,6 @@ int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, vhcr->in_modifier, outbox); } -void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap) -{ - if (!mlx4_is_mfunc(dev)) { - *stats_bitmap = 0; - return; - } - - *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK | - MLX4_STATS_TRAFFIC_DROPS_MASK | - MLX4_STATS_PORT_COUNTERS_MASK); - - if (mlx4_is_master(dev)) - *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; -} -EXPORT_SYMBOL(mlx4_set_stats_bitmap); - int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, int *slave_id) { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2bb8553bd905..b75214a80d0e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -412,7 +412,6 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); -#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params) @@ -443,6 +442,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); } + if (attr & MLX4_UPDATE_QP_RATE_LIMIT) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT; + cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val); + } + + if (attr & MLX4_UPDATE_QP_QOS_VPORT) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; + cmd->qp_context.qos_vport = params->qos_vport; + } + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); cmd->qp_mask = cpu_to_be64(qp_mask); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 486e3d26cd4a..c7f28bf4b8e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -221,11 +221,6 @@ struct res_fs_rule { int qpn; }; -static int mlx4_is_eth(struct mlx4_dev *dev, int port) -{ - return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1; -} - static void *res_tracker_lookup(struct rb_root *root, u64 res_id) { struct rb_node *node = root->rb_node; @@ -713,7 +708,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; u32 qp_type; - int port; + int port, err = 0; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; priv = mlx4_priv(dev); @@ -738,7 +733,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev, } else { struct mlx4_update_qp_params params = {.flags = 0}; - mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + if (err) + goto out; } } @@ -768,12 +765,14 @@ static int update_vport_qp_param(struct mlx4_dev *dev, qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; qpc->pri_path.sched_queue &= 0xC7; qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; + qpc->qos_vport = vp_oper->state.qos_vport; } if (vp_oper->state.spoofchk) { qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; } - return 0; +out: + return err; } static int mpt_mask(struct mlx4_dev *dev) @@ -2944,8 +2943,12 @@ static int verify_qp_parameters(struct mlx4_dev *dev, qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; optpar = be32_to_cpu(*(__be32 *) inbox->buf); - if (slave != mlx4_master_func_num(dev)) + if (slave != mlx4_master_func_num(dev)) { qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; + /* setting QP rate-limit is disallowed for VFs */ + if (qp_ctx->rate_limit_params) + return -EPERM; + } switch (qp_type) { case MLX4_QP_ST_RC: @@ -3024,7 +3027,7 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, /* Call the SW implementation of write_mtt: * - Prepare a dummy mtt struct - * - Translate inbox contents to simple addresses in host endianess */ + * - Translate inbox contents to simple addresses in host endianness */ mtt.offset = 0; /* TBD this is broken but I don't handle it since we don't really use it */ mtt.order = 0; @@ -3092,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) if (!priv->mfunc.master.slave_state) return -EINVAL; + /* check for slave valid, slave not PF, and slave active */ + if (slave < 0 || slave > dev->persist->num_vfs || + slave == dev->caps.function || + !priv->mfunc.master.slave_state[slave].active) + return 0; + event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; /* Create the event only if the slave is registered */ @@ -4909,6 +4918,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) qp->sched_queue & 0xC7; upd_context->qp_context.pri_path.sched_queue |= ((work->qos & 0x7) << 3); + upd_context->qp_mask |= + cpu_to_be64(1ULL << + MLX4_UPD_QP_MASK_QOS_VPP); + upd_context->qp_context.qos_vport = + work->qos_vport; } err = mlx4_cmd(dev, mailbox->dma, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 201ca6d76ce5..ac0f7bf4be95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -171,6 +171,9 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); db->dma = pgdir->db_dma + offset; + db->db[0] = 0; + db->db[1] = 0; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a2853057c779..e3273faf4568 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -125,7 +125,10 @@ static u8 alloc_token(struct mlx5_cmd *cmd) u8 token; spin_lock(&cmd->token_lock); - token = cmd->token++ % 255 + 1; + cmd->token++; + if (cmd->token == 0) + cmd->token++; + token = cmd->token; spin_unlock(&cmd->token_lock); return token; @@ -515,10 +518,11 @@ static void cmd_work_handler(struct work_struct *work) ent->ts1 = ktime_get_ns(); /* ring doorbell after the descriptor is valid */ + mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); - mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx); mmiowb(); + /* if not in polling don't use ent after this point */ if (cmd->mode == CMD_MODE_POLLING) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ @@ -1236,7 +1240,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, goto out_out; } - err = mlx5_copy_from_msg(out, outb, out_size); + if (!callback) + err = mlx5_copy_from_msg(out, outb, out_size); out_out: if (!callback) @@ -1319,6 +1324,45 @@ ex_err: return err; } +static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +{ + struct device *ddev = &dev->pdev->dev; + + cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, + &cmd->alloc_dma, GFP_KERNEL); + if (!cmd->cmd_alloc_buf) + return -ENOMEM; + + /* make sure it is aligned to 4K */ + if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { + cmd->cmd_buf = cmd->cmd_alloc_buf; + cmd->dma = cmd->alloc_dma; + cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; + return 0; + } + + dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, + cmd->alloc_dma); + cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, + 2 * MLX5_ADAPTER_PAGE_SIZE - 1, + &cmd->alloc_dma, GFP_KERNEL); + if (!cmd->cmd_alloc_buf) + return -ENOMEM; + + cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); + cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); + cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; + return 0; +} + +static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +{ + struct device *ddev = &dev->pdev->dev; + + dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, + cmd->alloc_dma); +} + int mlx5_cmd_init(struct mlx5_core_dev *dev) { int size = sizeof(struct mlx5_cmd_prot_block); @@ -1341,17 +1385,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) if (!cmd->pool) return -ENOMEM; - cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); - if (!cmd->cmd_buf) { - err = -ENOMEM; + err = alloc_cmd_page(dev, cmd); + if (err) goto err_free_pool; - } - cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) { - err = -ENOMEM; - goto err_free; - } cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; cmd->log_sz = cmd_l >> 4 & 0xf; @@ -1360,13 +1396,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); err = -EINVAL; - goto err_map; + goto err_free_page; } if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { dev_err(&dev->pdev->dev, "command queue size overflow\n"); err = -EINVAL; - goto err_map; + goto err_free_page; } cmd->checksum_disabled = 1; @@ -1378,7 +1414,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); err = -ENOTSUPP; - goto err_map; + goto err_free_page; } spin_lock_init(&cmd->alloc_lock); @@ -1394,7 +1430,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) if (cmd_l & 0xfff) { dev_err(&dev->pdev->dev, "invalid command queue address\n"); err = -ENOMEM; - goto err_map; + goto err_free_page; } iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); @@ -1410,7 +1446,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) err = create_msg_cache(dev); if (err) { dev_err(&dev->pdev->dev, "failed to create command cache\n"); - goto err_map; + goto err_free_page; } set_wqname(dev); @@ -1435,11 +1471,8 @@ err_wq: err_cache: destroy_msg_cache(dev); -err_map: - dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, - DMA_BIDIRECTIONAL); -err_free: - free_pages((unsigned long)cmd->cmd_buf, 0); +err_free_page: + free_cmd_page(dev, cmd); err_free_pool: pci_pool_destroy(cmd->pool); @@ -1455,9 +1488,7 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) clean_debug_files(dev); destroy_workqueue(cmd->wq); destroy_msg_cache(dev); - dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, - DMA_BIDIRECTIONAL); - free_pages((unsigned long)cmd->cmd_buf, 0); + free_cmd_page(dev, cmd); pci_pool_destroy(cmd->pool); } EXPORT_SYMBOL(mlx5_cmd_cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 43c5f4809526..eb0cf81f5f45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 4878025e231c..5210d92e6bc7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index da82991239a8..58800e4f3958 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -208,7 +208,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) * Make sure we read EQ entry contents after we've * checked the ownership bit. */ - rmb(); + dma_rmb(); mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 06f9036acd83..4b4cda3bcc5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 3e6670c4a7cd..292d76f2a904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c index fd80ecfa7195..ee1b0b965f34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 5394a8486558..28425e5ea91f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -48,11 +48,11 @@ #include "mlx5_core.h" #define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "2.2-1" -#define DRIVER_RELDATE "Feb 2014" +#define DRIVER_VERSION "3.0" +#define DRIVER_RELDATE "January 2015" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); -MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); +MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION); @@ -288,8 +288,6 @@ static void copy_rw_fields(void *to, struct mlx5_caps *from) MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); - MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc); - MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc); MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; @@ -509,6 +507,87 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) return 0; } +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq *eq, *n; + int err = -ENOENT; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} +EXPORT_SYMBOL(mlx5_vector2eqn); + +static void free_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (mlx5_destroy_unmap_eq(dev, eq)) + mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", + eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int alloc_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + char name[MLX5_MAX_EQ_NAME]; + struct mlx5_eq *eq; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = MLX5_COMP_EQ_SIZE; + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + err = mlx5_create_map_eq(dev, eq, + i + MLX5_EQ_VEC_COMP_BASE, nent, 0, + name, &dev->priv.uuari.uars[0]); + if (err) { + kfree(eq); + goto clean; + } + mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &table->comp_eqs_list); + spin_unlock(&table->lock); + } + + return 0; + +clean: + free_comp_eqs(dev); + return err; +} + static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) { struct mlx5_priv *priv = &dev->priv; @@ -645,6 +724,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_free_uar; } + err = alloc_comp_eqs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc completion EQs\n"); + goto err_stop_eqs; + } + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); mlx5_init_cq_table(dev); @@ -654,6 +739,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) return 0; +err_stop_eqs: + mlx5_stop_eqs(dev); + err_free_uar: mlx5_free_uuars(dev, &priv->uuari); @@ -697,7 +785,6 @@ err_dbg: debugfs_remove(priv->dbg_root); return err; } -EXPORT_SYMBOL(mlx5_dev_init); static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) { @@ -706,6 +793,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); + free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); mlx5_eq_cleanup(dev); @@ -820,6 +908,28 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) } EXPORT_SYMBOL(mlx5_unregister_interface); +void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) +{ + struct mlx5_priv *priv = &mdev->priv; + struct mlx5_device_context *dev_ctx; + unsigned long flags; + void *result = NULL; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) + if ((dev_ctx->intf->protocol == protocol) && + dev_ctx->intf->get_dev) { + result = dev_ctx->intf->get_dev(dev_ctx->context); + break; + } + + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + return result; +} +EXPORT_SYMBOL(mlx5_get_protocol_dev); + static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index 44837640bd7c..d79fd85d1dd5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index f0c9f9a7a361..a051b906afdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 184c3615f479..1adb300dd850 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -141,7 +141,7 @@ EXPORT_SYMBOL(mlx5_core_destroy_mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_query_mkey_mbox_out *out, int outlen) { - struct mlx5_destroy_mkey_mbox_in in; + struct mlx5_query_mkey_mbox_in in; int err; memset(&in, 0, sizeof(in)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4fdaae9b54d9..8a64542abc16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -211,26 +211,28 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) return 0; } +#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) + static void free_4k(struct mlx5_core_dev *dev, u64 addr) { struct fw_page *fwp; int n; - fwp = find_fw_page(dev, addr & PAGE_MASK); + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); if (!fwp) { mlx5_core_warn(dev, "page not found\n"); return; } - n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; + n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; fwp->free_count++; set_bit(n, &fwp->bitmask); if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { rb_erase(&fwp->rb_node, &dev->priv.page_root); if (fwp->free_count != 1) list_del(&fwp->list); - dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, + PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(fwp->page); kfree(fwp); } else if (fwp->free_count == 1) { @@ -243,8 +245,9 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) struct page *page; u64 addr; int err; + int nid = dev_to_node(&dev->pdev->dev); - page = alloc_page(GFP_HIGHUSER); + page = alloc_pages_node(nid, GFP_HIGHUSER, 0); if (!page) { mlx5_core_warn(dev, "failed to allocate page\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c index 790da5c4ca4f..f2d3aee909e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 72c2d002c3b8..49e90f2612d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 575d853dbe05..dc7dbf7e9d98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index 38bce93f8314..f9d25dcd03c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 06801d6f595e..5a89bb1d678a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 10988fbf47eb..6f332ebdf3b5 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4144,7 +4144,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) for (i = 0; i < hw->addr_list_size; i++) { if (ether_addr_equal(hw->address[i], mac_addr)) { - memset(hw->address[i], 0, ETH_ALEN); + eth_zero_addr(hw->address[i]); writel(0, hw->io + ADD_ADDR_INCR * i + KS_ADD_ADDR_0_HI); return 0; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 6c72e74fef3e..81d0f1c86d6d 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -150,7 +150,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->rx_head = 0; - /* reset the MAC controler TX/RX desciptor base address */ + /* reset the MAC controller TX/RX desciptor base address */ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS); writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS); } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index a4cdf2f8041a..1e0f72b65459 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -1343,7 +1343,7 @@ static int init_nic(struct s2io_nic *nic) TX_PA_CFG_IGNORE_L2_ERR; writeq(val64, &bar0->tx_pa_cfg); - /* Rx DMA intialization. */ + /* Rx DMA initialization. */ val64 = 0; for (i = 0; i < config->rx_ring_num; i++) { struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; @@ -2520,7 +2520,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n", ring->dev->name); if (first_rxdp) { - wmb(); + dma_wmb(); first_rxdp->Control_1 |= RXD_OWN_XENA; } swstats->mem_alloc_fail_cnt++; @@ -2634,7 +2634,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, rxdp->Control_2 |= SET_RXD_MARKER; if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { if (first_rxdp) { - wmb(); + dma_wmb(); first_rxdp->Control_1 |= RXD_OWN_XENA; } first_rxdp = rxdp; @@ -2649,7 +2649,7 @@ end: * and other fields are seen by adapter correctly. */ if (first_rxdp) { - wmb(); + dma_wmb(); first_rxdp->Control_1 |= RXD_OWN_XENA; } @@ -6950,7 +6950,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) } set_rxd_buffer_size(sp, rxdp, size); - wmb(); + dma_wmb(); /* flip the Ownership bit to Hardware */ rxdp->Control_1 |= RXD_OWN_XENA; } diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index b07d552a27d4..be916eb2f2e7 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -18,6 +18,25 @@ #include "vxge-ethtool.h" +static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { + {"\n DRIVER STATISTICS"}, + {"vpaths_opened"}, + {"vpath_open_fail_cnt"}, + {"link_up_cnt"}, + {"link_down_cnt"}, + {"tx_frms"}, + {"tx_errors"}, + {"tx_bytes"}, + {"txd_not_free"}, + {"txd_out_of_desc"}, + {"rx_frms"}, + {"rx_errors"}, + {"rx_bytes"}, + {"rx_mcast"}, + {"pci_map_fail_cnt"}, + {"skb_alloc_fail_cnt"} +}; + /** * vxge_ethtool_sset - Sets different link parameters. * @dev: device pointer. diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h index 6cf3044d7f43..065a2c0429a4 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h @@ -19,25 +19,6 @@ /* Ethtool related variables and Macros. */ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset); -static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { - {"\n DRIVER STATISTICS"}, - {"vpaths_opened"}, - {"vpath_open_fail_cnt"}, - {"link_up_cnt"}, - {"link_down_cnt"}, - {"tx_frms"}, - {"tx_errors"}, - {"tx_bytes"}, - {"txd_not_free"}, - {"txd_out_of_desc"}, - {"rx_frms"}, - {"rx_errors"}, - {"rx_bytes"}, - {"rx_mcast"}, - {"pci_map_fail_cnt"}, - {"skb_alloc_fail_cnt"} -}; - #define VXGE_TITLE_LEN 5 #define VXGE_HW_VPATH_STATS_LEN 27 #define VXGE_HW_AGGR_STATS_LEN 13 diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index d36599f47af5..7bf9c028d8d7 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1557,7 +1557,7 @@ static int octeon_mgmt_remove(struct platform_device *pdev) return 0; } -static struct of_device_id octeon_mgmt_match[] = { +static const struct of_device_id octeon_mgmt_match[] = { { .compatible = "cavium,octeon-5750-mix", }, diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 4fe8ea96bd25..f6fcf7450352 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -394,7 +394,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev, } /** - * pch_gbe_set_pauseparam - Set pause paramters + * pch_gbe_set_pauseparam - Set pause parameters * @netdev: Network interface device structure * @pause: Pause parameters structure * Returns: diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 319d9d40f922..13d88a6025c8 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -350,7 +350,7 @@ V. Recent Changes incorrectly defined and corrected (as per Michel Mueller). 02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots - were available before reseting the tbusy and tx_full flags + were available before resetting the tbusy and tx_full flags (as per Michel Mueller). 03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support. diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 44e8d7d25547..57a6e6cd74fc 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev) if (mac->phydev) phy_start(mac->phydev); - init_timer(&mac->tx->clean_timer); - mac->tx->clean_timer.function = pasemi_mac_tx_timer; - mac->tx->clean_timer.data = (unsigned long)mac->tx; - mac->tx->clean_timer.expires = jiffies+HZ; - add_timer(&mac->tx->clean_timer); + setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, + (unsigned long)mac->tx); + mod_timer(&mac->tx->clean_timer, jiffies + HZ); return 0; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 6e426ae94692..0a5e204a0179 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -354,7 +354,7 @@ struct cmd_desc_type0 { } __attribute__ ((aligned(64))); -/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +/* Note: sizeof(rcv_desc) should always be a multiple of 2 */ struct rcv_desc { __le16 reference_handle; __le16 reserved; @@ -499,7 +499,7 @@ struct uni_data_desc{ #define NETXEN_IMAGE_START 0x43000 /* compressed image */ #define NETXEN_SECONDARY_START 0x200000 /* backup images */ #define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ -#define NETXEN_USER_START 0x3E8000 /* Firmare info */ +#define NETXEN_USER_START 0x3E8000 /* Firmware info */ #define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ #define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 716fc37ada5a..db80eb1c6d4f 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -537,7 +537,7 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev) u8 null_addr[ETH_ALEN]; int i; - memset(null_addr, 0, ETH_ALEN); + eth_zero_addr(null_addr); if (netdev->flags & IFF_PROMISC) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fa4317611fd6..f221126a5c4e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -314,7 +314,7 @@ struct qlcnic_fdt { #define QLCNIC_BRDCFG_START 0x4000 /* board config */ #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ #define QLCNIC_IMAGE_START 0x43000 /* compressed image */ -#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ +#define QLCNIC_USER_START 0x3E8000 /* Firmware info */ #define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) #define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index f3346a3779d3..69f828eb42cf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -205,7 +205,7 @@ struct qlcnic_add_rings_mbx_out { * @phys_addr_{low|high}: DMA address of the transmit buffer * @cnsmr_index_{low|high}: host consumer index * @size: legth of transmit buffer ring - * @intr_id: interrput id + * @intr_id: interrupt id * @src: src of interrupt */ struct qlcnic_tx_mbx { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 2bb48d57e7a5..33669c29b341 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -269,7 +269,7 @@ static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter, } QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0); - /* Clear gracefull reset bit */ + /* Clear graceful reset bit */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val &= ~QLC_83XX_IDC_GRACEFULL_RESET; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); @@ -889,7 +889,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) * @adapter: adapter structure * * Device will remain in this state until: - * Reset request ACK's are recieved from all the functions + * Reset request ACK's are received from all the functions * Wait time exceeds max time limit * * Returns: Error code or Success(0) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 3e0f705a4311..75ee9e4ced51 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -818,7 +818,7 @@ int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter, if (rv) netdev_err(adapter->netdev, - "Failed to set Rx coalescing parametrs\n"); + "Failed to set Rx coalescing parameters\n"); return rv; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index a430a34a4434..367f3976df56 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -507,6 +507,7 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { + features = vlan_features_check(skb, features); return vxlan_features_check(skb, features); } #endif diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8011ef3e7707..25800a1dedcb 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -460,7 +460,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Set Mac addr %pM\n", addr); } else { - memset(zero_mac_addr, 0, ETH_ALEN); + eth_zero_addr(zero_mac_addr); addr = &zero_mac_addr[0]; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Clearing MAC address\n"); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 2c811f66d5ac..f66641d961e3 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -41,7 +41,6 @@ #include <linux/skbuff.h> #include <linux/spi/spi.h> #include <linux/types.h> -#include <linux/version.h> #include "qca_7k.h" #include "qca_debug.h" @@ -571,7 +570,7 @@ qcaspi_spi_thread(void *data) } /* can only handle other interrupts - * if sync has occured + * if sync has occurred */ if (qca->sync == QCASPI_SYNC_READY) { if (intr_cause & SPI_INT_PKT_AVLBL) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ad0020af2193..c70ab40d8698 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) int rc = -EINVAL; if (!rtl_fw_format_ok(tp, rtl_fw)) { - netif_err(tp, ifup, dev, "invalid firwmare\n"); + netif_err(tp, ifup, dev, "invalid firmware\n"); goto out; } @@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) RTL_W8(ChipCmd, CmdReset); rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); - - netdev_reset_queue(tp->dev); } static void rtl_request_uncached_firmware(struct rtl8169_private *tp) @@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, u32 status, len; u32 opts[2]; int frags; - bool stop_queue; if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); @@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ @@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); + RTL_W8(TxPoll, NPQ); - if (!skb->xmit_more || stop_queue || - netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) { - RTL_W8(TxPoll, NPQ); - - mmiowb(); - } + mmiowb(); - if (stop_queue) { + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ @@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) { unsigned int dirty_tx, tx_left; - unsigned int bytes_compl = 0, pkts_compl = 0; dirty_tx = tp->dirty_tx; smp_rmb(); @@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { - pkts_compl++; - bytes_compl += tx_skb->skb->len; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } @@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) } if (tp->dirty_tx != dirty_tx) { - netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); - - u64_stats_update_begin(&tp->tx_stats.syncp); - tp->tx_stats.packets += pkts_compl; - tp->tx_stats.bytes += bytes_compl; - u64_stats_update_end(&tp->tx_stats.syncp); - tp->dirty_tx = dirty_tx; /* Sync with rtl8169_start_xmit: * - publish dirty_tx ring index (write barrier) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4da8bd263997..7fb244f565b2 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -52,7 +52,12 @@ NETIF_MSG_RX_ERR| \ NETIF_MSG_TX_ERR) +#define SH_ETH_OFFSET_DEFAULTS \ + [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID + static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [EDSR] = 0x0000, [EDMR] = 0x0400, [EDTRR] = 0x0408, @@ -132,9 +137,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_POST3] = 0x0078, [TSU_POST4] = 0x007c, [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRH31] = 0x01f8, - [TSU_ADRL31] = 0x01fc, [TXNLCR0] = 0x0080, [TXALCR0] = 0x0084, @@ -151,6 +153,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [EDSR] = 0x0000, [EDMR] = 0x0400, [EDTRR] = 0x0408, @@ -199,9 +203,6 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_ADSBSY] = 0x0060, [TSU_TEN] = 0x0064, [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRH31] = 0x01f8, - [TSU_ADRL31] = 0x01fc, [TXNLCR0] = 0x0080, [TXALCR0] = 0x0084, @@ -210,6 +211,8 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [ECMR] = 0x0300, [RFLR] = 0x0308, [ECSR] = 0x0310, @@ -256,6 +259,8 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [ECMR] = 0x0100, [RFLR] = 0x0108, [ECSR] = 0x0110, @@ -308,6 +313,8 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { }; static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + [EDMR] = 0x0000, [EDTRR] = 0x0004, [EDRRR] = 0x0008, @@ -392,8 +399,6 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [FWALCR1] = 0x00b4, [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRL31] = 0x01fc, }; static void sh_eth_rcv_snd_disable(struct net_device *ndev); @@ -508,7 +513,6 @@ static struct sh_eth_cpu_data r8a779x_data = { .tpauser = 1, .hw_swap = 1, .rmiimode = 1, - .shift_rd0 = 1, }; static void sh_eth_set_rate_sh7724(struct net_device *ndev) @@ -589,6 +593,7 @@ static struct sh_eth_cpu_data sh7757_data = { .no_ade = 1, .rpadir = 1, .rpadir_value = 2 << 16, + .rtrate = 1, }; #define SH_GIGA_ETH_BASE 0xfee00000UL @@ -1392,6 +1397,9 @@ static void sh_eth_dev_exit(struct net_device *ndev) msleep(2); /* max frame time at 10 Mbps < 1250 us */ sh_eth_get_stats(ndev); sh_eth_reset(ndev); + + /* Set MAC address again */ + update_mac_address(ndev); } /* free Tx skb function */ @@ -1407,6 +1415,11 @@ static int sh_eth_txfree(struct net_device *ndev) txdesc = &mdp->tx_ring[entry]; if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) break; + /* TACT bit must be checked before all the following reads */ + rmb(); + netif_info(mdp, tx_done, ndev, + "tx entry %d status 0x%08x\n", + entry, edmac_to_cpu(mdp, txdesc->status)); /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { dma_unmap_single(&ndev->dev, txdesc->addr, @@ -1444,19 +1457,25 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) limit = boguscnt; rxdesc = &mdp->rx_ring[entry]; while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + /* RACT bit must be checked before all the following reads */ + rmb(); desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length; if (--boguscnt < 0) break; + netif_info(mdp, rx_status, ndev, + "rx entry %d status 0x%08x len %d\n", + entry, desc_status, pkt_len); + if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; /* In case of almost all GETHER/ETHERs, the Receive Frame State * (RFS) bits in the Receive Descriptor 0 are from bit 9 to - * bit 0. However, in case of the R8A7740, R8A779x, and - * R7S72100 the RFS bits are from bit 25 to bit 16. So, the + * bit 0. However, in case of the R8A7740 and R7S72100 + * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ if (mdp->cd->shift_rd0) @@ -1494,6 +1513,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) netif_receive_skb(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += pkt_len; + if (desc_status & RD_RFS8) + ndev->stats.multicast++; } entry = (++mdp->cur_rx) % mdp->num_rx_ring; rxdesc = &mdp->rx_ring[entry]; @@ -1523,6 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) skb_checksum_none_assert(skb); rxdesc->addr = dma_addr; } + wmb(); /* RACT bit must be set after all the above writes */ if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); @@ -1535,7 +1557,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) /* If we don't need to check status, don't. -KDU */ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { /* fix the values for the next receiving if RDE is set */ - if (intr_status & EESR_RDE) { + if (intr_status & EESR_RDE && + mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) { u32 count = (sh_eth_read(ndev, RDFAR) - sh_eth_read(ndev, RDLAR)) >> 4; @@ -1922,6 +1945,192 @@ error_exit: return ret; } +/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the + * version must be bumped as well. Just adding registers up to that + * limit is fine, as long as the existing register indices don't + * change. + */ +#define SH_ETH_REG_DUMP_VERSION 1 +#define SH_ETH_REG_DUMP_MAX_REGS 256 + +static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + u32 *valid_map; + size_t len; + + BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS); + + /* Dump starts with a bitmap that tells ethtool which + * registers are defined for this chip. + */ + len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32); + if (buf) { + valid_map = buf; + buf += len; + } else { + valid_map = NULL; + } + + /* Add a register to the dump, if it has a defined offset. + * This automatically skips most undefined registers, but for + * some it is also necessary to check a capability flag in + * struct sh_eth_cpu_data. + */ +#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32) +#define add_reg_from(reg, read_expr) do { \ + if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ + if (buf) { \ + mark_reg_valid(reg); \ + *buf++ = read_expr; \ + } \ + ++len; \ + } \ + } while (0) +#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg)) +#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) + + add_reg(EDSR); + add_reg(EDMR); + add_reg(EDTRR); + add_reg(EDRRR); + add_reg(EESR); + add_reg(EESIPR); + add_reg(TDLAR); + add_reg(TDFAR); + add_reg(TDFXR); + add_reg(TDFFR); + add_reg(RDLAR); + add_reg(RDFAR); + add_reg(RDFXR); + add_reg(RDFFR); + add_reg(TRSCER); + add_reg(RMFCR); + add_reg(TFTR); + add_reg(FDR); + add_reg(RMCR); + add_reg(TFUCR); + add_reg(RFOCR); + if (cd->rmiimode) + add_reg(RMIIMODE); + add_reg(FCFTR); + if (cd->rpadir) + add_reg(RPADIR); + if (!cd->no_trimd) + add_reg(TRIMD); + add_reg(ECMR); + add_reg(ECSR); + add_reg(ECSIPR); + add_reg(PIR); + if (!cd->no_psr) + add_reg(PSR); + add_reg(RDMLR); + add_reg(RFLR); + add_reg(IPGR); + if (cd->apr) + add_reg(APR); + if (cd->mpr) + add_reg(MPR); + add_reg(RFCR); + add_reg(RFCF); + if (cd->tpauser) + add_reg(TPAUSER); + add_reg(TPAUSECR); + add_reg(GECMR); + if (cd->bculr) + add_reg(BCULR); + add_reg(MAHR); + add_reg(MALR); + add_reg(TROCR); + add_reg(CDCR); + add_reg(LCCR); + add_reg(CNDCR); + add_reg(CEFCR); + add_reg(FRECR); + add_reg(TSFRCR); + add_reg(TLFRCR); + add_reg(CERCR); + add_reg(CEECR); + add_reg(MAFCR); + if (cd->rtrate) + add_reg(RTRATE); + if (cd->hw_crc) + add_reg(CSMR); + if (cd->select_mii) + add_reg(RMII_MII); + add_reg(ARSTR); + if (cd->tsu) { + add_tsu_reg(TSU_CTRST); + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + add_tsu_reg(TSU_FWSLC); + add_tsu_reg(TSU_QTAG0); + add_tsu_reg(TSU_QTAG1); + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + add_tsu_reg(TSU_ADSBSY); + add_tsu_reg(TSU_TEN); + add_tsu_reg(TSU_POST1); + add_tsu_reg(TSU_POST2); + add_tsu_reg(TSU_POST3); + add_tsu_reg(TSU_POST4); + if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) { + /* This is the start of a table, not just a single + * register. + */ + if (buf) { + unsigned int i; + + mark_reg_valid(TSU_ADRH0); + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++) + *buf++ = ioread32( + mdp->tsu_addr + + mdp->reg_offset[TSU_ADRH0] + + i * 4); + } + len += SH_ETH_TSU_CAM_ENTRIES * 2; + } + } + +#undef mark_reg_valid +#undef add_reg_from +#undef add_reg +#undef add_tsu_reg + + return len * 4; +} + +static int sh_eth_get_regs_len(struct net_device *ndev) +{ + return __sh_eth_get_regs(ndev, NULL); +} + +static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + regs->version = SH_ETH_REG_DUMP_VERSION; + + pm_runtime_get_sync(&mdp->pdev->dev); + __sh_eth_get_regs(ndev, buf); + pm_runtime_put_sync(&mdp->pdev->dev); +} + static int sh_eth_nway_reset(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2067,6 +2276,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev, static const struct ethtool_ops sh_eth_ethtool_ops = { .get_settings = sh_eth_get_settings, .set_settings = sh_eth_set_settings, + .get_regs_len = sh_eth_get_regs_len, + .get_regs = sh_eth_get_regs, .nway_reset = sh_eth_nway_reset, .get_msglevel = sh_eth_get_msglevel, .set_msglevel = sh_eth_set_msglevel, @@ -2174,7 +2385,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } spin_unlock_irqrestore(&mdp->lock, flags); - if (skb_padto(skb, ETH_ZLEN)) + if (skb_put_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; entry = mdp->cur_tx % mdp->num_tx_ring; @@ -2192,6 +2403,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } txdesc->buffer_length = skb->len; + wmb(); /* TACT bit must be set after all the above writes */ if (entry >= mdp->num_tx_ring - 1) txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); else @@ -2205,6 +2417,22 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +/* The statistics registers have write-clear behaviour, which means we + * will lose any increment between the read and write. We mitigate + * this by only clearing when we read a non-zero value, so we will + * never falsely report a total of zero. + */ +static void +sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg) +{ + u32 delta = sh_eth_read(ndev, reg); + + if (delta) { + *stat += delta; + sh_eth_write(ndev, 0, reg); + } +} + static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2215,21 +2443,18 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) if (!mdp->is_opened) return &ndev->stats; - ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); - sh_eth_write(ndev, 0, TROCR); /* (write clear) */ - ndev->stats.collisions += sh_eth_read(ndev, CDCR); - sh_eth_write(ndev, 0, CDCR); /* (write clear) */ - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); - sh_eth_write(ndev, 0, LCCR); /* (write clear) */ + sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR); + sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); if (sh_eth_is_gether(mdp)) { - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); - sh_eth_write(ndev, 0, CERCR); /* (write clear) */ - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); - sh_eth_write(ndev, 0, CEECR); /* (write clear) */ + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CERCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CEECR); } else { - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); - sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CNDCR); } return &ndev->stats; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 259d03f353e1..06dbbe5201cb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -32,6 +32,10 @@ #define SH_ETH_TSU_CAM_ENTRIES 32 enum { + /* IMPORTANT: To keep ethtool register dump working, add new + * register names immediately before SH_ETH_MAX_REGISTER_OFFSET. + */ + /* E-DMAC registers */ EDSR = 0, EDMR, @@ -131,9 +135,7 @@ enum { TSU_POST3, TSU_POST4, TSU_ADRH0, - TSU_ADRL0, - TSU_ADRH31, - TSU_ADRL31, + /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */ TXNLCR0, TXALCR0, @@ -491,6 +493,7 @@ struct sh_eth_cpu_data { unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ + unsigned rtrate:1; /* EtherC has RTRATE register */ }; struct sh_eth_private { @@ -543,19 +546,29 @@ static inline void sh_eth_soft_swap(char *src, int len) #endif } +#define SH_ETH_OFFSET_INVALID ((u16) ~0) + static inline void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) { struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; - iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]); + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->addr + offset); } static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) { struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; - return ioread32(mdp->addr + mdp->reg_offset[enum_index]); + return ioread32(mdp->addr + offset); } static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 34389b6aa67c..a570a60533be 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -30,8 +30,12 @@ #include <linux/if_vlan.h> #include <linux/if_bridge.h> #include <linux/bitops.h> +#include <linux/ctype.h> #include <net/switchdev.h> #include <net/rtnetlink.h> +#include <net/ip_fib.h> +#include <net/netevent.h> +#include <net/arp.h> #include <asm-generic/io-64-nonatomic-lo-hi.h> #include <generated/utsrelease.h> @@ -49,12 +53,12 @@ struct rocker_flow_tbl_key { enum rocker_of_dpa_table_id tbl_id; union { struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; enum rocker_of_dpa_table_id goto_tbl; } ig_port; struct { - u32 in_lport; + u32 in_pport; __be16 vlan_id; __be16 vlan_id_mask; enum rocker_of_dpa_table_id goto_tbl; @@ -62,8 +66,8 @@ struct rocker_flow_tbl_key { __be16 new_vlan_id; } vlan; struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; __be16 eth_type; u8 eth_dst[ETH_ALEN]; u8 eth_dst_mask[ETH_ALEN]; @@ -91,8 +95,8 @@ struct rocker_flow_tbl_key { bool copy_to_cpu; } bridge; struct { - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; u8 eth_src[ETH_ALEN]; u8 eth_src_mask[ETH_ALEN]; u8 eth_dst[ETH_ALEN]; @@ -111,9 +115,10 @@ struct rocker_flow_tbl_key { struct rocker_flow_tbl_entry { struct hlist_node entry; - u32 ref_count; + u32 cmd; u64 cookie; struct rocker_flow_tbl_key key; + size_t key_len; u32 key_crc32; /* key */ }; @@ -148,7 +153,7 @@ struct rocker_fdb_tbl_entry { u32 key_crc32; /* key */ bool learned; struct rocker_fdb_tbl_key { - u32 lport; + u32 pport; u8 addr[ETH_ALEN]; __be16 vlan_id; } key; @@ -161,6 +166,16 @@ struct rocker_internal_vlan_tbl_entry { __be16 vlan_id; }; +struct rocker_neigh_tbl_entry { + struct hlist_node entry; + __be32 ip_addr; /* key */ + struct net_device *dev; + u32 ref_count; + u32 index; + u8 eth_dst[ETH_ALEN]; + bool ttl_check; +}; + struct rocker_desc_info { char *data; /* mapped */ size_t data_size; @@ -200,7 +215,7 @@ struct rocker_port { struct net_device *bridge_dev; struct rocker *rocker; unsigned int port_number; - u32 lport; + u32 pport; __be16 internal_vlan_id; int stp_state; u32 brport_flags; @@ -234,6 +249,9 @@ struct rocker { unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; DECLARE_HASHTABLE(internal_vlan_tbl, 8); spinlock_t internal_vlan_tbl_lock; + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; + u32 neigh_tbl_next_index; }; static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; @@ -256,7 +274,6 @@ enum { ROCKER_PRIORITY_VLAN = 1, ROCKER_PRIORITY_TERM_MAC_UCAST = 0, ROCKER_PRIORITY_TERM_MAC_MCAST = 1, - ROCKER_PRIORITY_UNICAST_ROUTING = 1, ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, ROCKER_PRIORITY_BRIDGING_VLAN = 3, @@ -789,7 +806,30 @@ static u32 __pos_inc(u32 pos, size_t limit) static int rocker_desc_err(struct rocker_desc_info *desc_info) { - return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN); + int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; + + switch (err) { + case ROCKER_OK: + return 0; + case -ROCKER_ENOENT: + return -ENOENT; + case -ROCKER_ENXIO: + return -ENXIO; + case -ROCKER_ENOMEM: + return -ENOMEM; + case -ROCKER_EEXIST: + return -EEXIST; + case -ROCKER_EINVAL: + return -EINVAL; + case -ROCKER_EMSGSIZE: + return -EMSGSIZE; + case -ROCKER_ENOTSUP: + return -EOPNOTSUPP; + case -ROCKER_ENOBUFS: + return -ENOBUFS; + } + + return -EINVAL; } static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) @@ -1257,9 +1297,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); if (enable) - val |= 1 << rocker_port->lport; + val |= 1ULL << rocker_port->pport; else - val &= ~(1 << rocker_port->lport); + val &= ~(1ULL << rocker_port->pport); rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); } @@ -1312,11 +1352,11 @@ static int rocker_event_link_change(struct rocker *rocker, struct rocker_port *rocker_port; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] || + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) return -EIO; port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1; + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); if (port_number >= rocker->port_count) @@ -1353,12 +1393,12 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker, __be16 vlan_id; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] || + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) return -EIO; port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1; + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); @@ -1517,8 +1557,8 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_info); return 0; @@ -1591,6 +1631,53 @@ rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker, return 0; } +struct port_name { + char *buf; + size_t len; +}; + +static int +rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct port_name *name = priv; + struct rocker_tlv *attr; + size_t i, j, len; + char *str; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; + if (!attr) + return -EIO; + + len = min_t(size_t, rocker_tlv_len(attr), name->len); + str = rocker_tlv_data(attr); + + /* make sure name only contains alphanumeric characters */ + for (i = j = 0; i < len; ++i) { + if (isalnum(str[i])) { + name->buf[j] = str[i]; + j++; + } + } + + if (j == 0) + return -EIO; + + name->buf[j] = '\0'; + + return 0; +} + static int rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, struct rocker_port *rocker_port, @@ -1606,8 +1693,8 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, ethtool_cmd_speed(ecmd))) @@ -1637,8 +1724,8 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, ETH_ALEN, macaddr)) @@ -1661,8 +1748,8 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker, cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, !!(rocker_port->brport_flags & BR_LEARNING))) @@ -1715,11 +1802,11 @@ static int rocker_port_set_learning(struct rocker_port *rocker_port) static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.ig_port.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.ig_port.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.ig_port.goto_tbl)) @@ -1731,8 +1818,8 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.vlan.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->key.vlan.vlan_id)) @@ -1754,11 +1841,11 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.term_mac.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.term_mac.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, entry->key.term_mac.eth_type)) @@ -1845,11 +1932,11 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, struct rocker_flow_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, - entry->key.acl.in_lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, - entry->key.acl.in_lport_mask)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, ETH_ALEN, entry->key.acl.eth_src)) @@ -1917,8 +2004,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker, struct rocker_tlv *cmd_info; int err = 0; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD)) + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) @@ -1975,8 +2061,7 @@ static int rocker_cmd_flow_tbl_del(struct rocker *rocker, const struct rocker_flow_tbl_entry *entry = priv; struct rocker_tlv *cmd_info; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL)) + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) @@ -1993,7 +2078,7 @@ static int rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, struct rocker_group_tbl_entry *entry) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT, + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, ROCKER_GROUP_PORT_GET(entry->group_id))) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, @@ -2145,9 +2230,9 @@ static int rocker_cmd_group_tbl_del(struct rocker *rocker, return 0; } -/***************************************** - * Flow, group, FDB, internal VLAN tables - *****************************************/ +/*************************************************** + * Flow, group, FDB, internal VLAN and neigh tables + ***************************************************/ static int rocker_init_tbls(struct rocker *rocker) { @@ -2163,6 +2248,9 @@ static int rocker_init_tbls(struct rocker *rocker) hash_init(rocker->internal_vlan_tbl); spin_lock_init(&rocker->internal_vlan_tbl_lock); + hash_init(rocker->neigh_tbl); + spin_lock_init(&rocker->neigh_tbl_lock); + return 0; } @@ -2173,6 +2261,7 @@ static void rocker_free_tbls(struct rocker *rocker) struct rocker_group_tbl_entry *group_entry; struct rocker_fdb_tbl_entry *fdb_entry; struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; + struct rocker_neigh_tbl_entry *neigh_entry; struct hlist_node *tmp; int bkt; @@ -2196,16 +2285,22 @@ static void rocker_free_tbls(struct rocker *rocker) tmp, internal_vlan_entry, entry) hash_del(&internal_vlan_entry->entry); spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); + + spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); + hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) + hash_del(&neigh_entry->entry); + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); } static struct rocker_flow_tbl_entry * rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match) { struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); hash_for_each_possible(rocker->flow_tbl, found, entry, match->key_crc32) { - if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + if (memcmp(&found->key, &match->key, key_len) == 0) return found; } @@ -2218,42 +2313,34 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port, { struct rocker *rocker = rocker_port->rocker; struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); unsigned long flags; - bool add_to_hw = false; - int err = 0; - match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + match->key_crc32 = crc32(~0, &match->key, key_len); spin_lock_irqsave(&rocker->flow_tbl_lock, flags); found = rocker_flow_tbl_find(rocker, match); if (found) { - kfree(match); + match->cookie = found->cookie; + hash_del(&found->entry); + kfree(found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; } else { found = match; found->cookie = rocker->flow_tbl_next_cookie++; - hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); - add_to_hw = true; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; } - found->ref_count++; + hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - if (add_to_hw) { - err = rocker_cmd_exec(rocker, rocker_port, - rocker_cmd_flow_tbl_add, - found, NULL, NULL, nowait); - if (err) { - spin_lock_irqsave(&rocker->flow_tbl_lock, flags); - hash_del(&found->entry); - spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - kfree(found); - } - } - - return err; + return rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_add, + found, NULL, NULL, nowait); } static int rocker_flow_tbl_del(struct rocker_port *rocker_port, @@ -2262,29 +2349,26 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port, { struct rocker *rocker = rocker_port->rocker; struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); unsigned long flags; - bool del_from_hw = false; int err = 0; - match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + match->key_crc32 = crc32(~0, &match->key, key_len); spin_lock_irqsave(&rocker->flow_tbl_lock, flags); found = rocker_flow_tbl_find(rocker, match); if (found) { - found->ref_count--; - if (found->ref_count == 0) { - hash_del(&found->entry); - del_from_hw = true; - } + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; } spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); kfree(match); - if (del_from_hw) { + if (found) { err = rocker_cmd_exec(rocker, rocker_port, rocker_cmd_flow_tbl_del, found, NULL, NULL, nowait); @@ -2311,7 +2395,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port, } static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, - int flags, u32 in_lport, u32 in_lport_mask, + int flags, u32 in_pport, u32 in_pport_mask, enum rocker_of_dpa_table_id goto_tbl) { struct rocker_flow_tbl_entry *entry; @@ -2322,15 +2406,15 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, entry->key.priority = ROCKER_PRIORITY_IG_PORT; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; - entry->key.ig_port.in_lport = in_lport; - entry->key.ig_port.in_lport_mask = in_lport_mask; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; entry->key.ig_port.goto_tbl = goto_tbl; return rocker_flow_tbl_do(rocker_port, flags, entry); } static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, - int flags, u32 in_lport, + int flags, u32 in_pport, __be16 vlan_id, __be16 vlan_id_mask, enum rocker_of_dpa_table_id goto_tbl, bool untagged, __be16 new_vlan_id) @@ -2343,7 +2427,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, entry->key.priority = ROCKER_PRIORITY_VLAN; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; - entry->key.vlan.in_lport = in_lport; + entry->key.vlan.in_pport = in_pport; entry->key.vlan.vlan_id = vlan_id; entry->key.vlan.vlan_id_mask = vlan_id_mask; entry->key.vlan.goto_tbl = goto_tbl; @@ -2355,7 +2439,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, } static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, - u32 in_lport, u32 in_lport_mask, + u32 in_pport, u32 in_pport_mask, __be16 eth_type, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 vlan_id, __be16 vlan_id_mask, bool copy_to_cpu, @@ -2378,8 +2462,8 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, } entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - entry->key.term_mac.in_lport = in_lport; - entry->key.term_mac.in_lport_mask = in_lport_mask; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; entry->key.term_mac.eth_type = eth_type; ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); @@ -2444,9 +2528,34 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, return rocker_flow_tbl_do(rocker_port, flags, entry); } +static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, + __be16 eth_type, __be32 dst, + __be32 dst_mask, u32 priority, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + entry->key.priority = priority; + entry->key.ucast_routing.eth_type = eth_type; + entry->key.ucast_routing.dst4 = dst; + entry->key.ucast_routing.dst4_mask = dst_mask; + entry->key.ucast_routing.goto_tbl = goto_tbl; + entry->key.ucast_routing.group_id = group_id; + entry->key_len = offsetof(struct rocker_flow_tbl_key, + ucast_routing.group_id); + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, - int flags, u32 in_lport, - u32 in_lport_mask, + int flags, u32 in_pport, + u32 in_pport_mask, const u8 *eth_src, const u8 *eth_src_mask, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 eth_type, @@ -2472,8 +2581,8 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, entry->key.priority = priority; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - entry->key.acl.in_lport = in_lport; - entry->key.acl.in_lport_mask = in_lport_mask; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; if (eth_src) ether_addr_copy(entry->key.acl.eth_src, eth_src); @@ -2531,7 +2640,6 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, struct rocker *rocker = rocker_port->rocker; struct rocker_group_tbl_entry *found; unsigned long flags; - int err = 0; spin_lock_irqsave(&rocker->group_tbl_lock, flags); @@ -2551,12 +2659,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - if (found->cmd) - err = rocker_cmd_exec(rocker, rocker_port, - rocker_cmd_group_tbl_add, - found, NULL, NULL, nowait); - - return err; + return rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_add, + found, NULL, NULL, nowait); } static int rocker_group_tbl_del(struct rocker_port *rocker_port, @@ -2604,7 +2709,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port, static int rocker_group_l2_interface(struct rocker_port *rocker_port, int flags, __be16 vlan_id, - u32 out_lport, int pop_vlan) + u32 out_pport, int pop_vlan) { struct rocker_group_tbl_entry *entry; @@ -2612,7 +2717,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port, if (!entry) return -ENOMEM; - entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); entry->l2_interface.pop_vlan = pop_vlan; return rocker_group_tbl_do(rocker_port, flags, entry); @@ -2652,17 +2757,262 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port, group_id); } +static int rocker_group_l3_unicast(struct rocker_port *rocker_port, + int flags, u32 index, u8 *src_mac, + u8 *dst_mac, __be16 vlan_id, + bool ttl_check, u32 pport) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L3_UNICAST(index); + if (src_mac) + ether_addr_copy(entry->l3_unicast.eth_src, src_mac); + if (dst_mac) + ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); + entry->l3_unicast.vlan_id = vlan_id; + entry->l3_unicast.ttl_check = ttl_check; + entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static struct rocker_neigh_tbl_entry * + rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr) +{ + struct rocker_neigh_tbl_entry *found; + + hash_for_each_possible(rocker->neigh_tbl, found, + entry, be32_to_cpu(ip_addr)) + if (found->ip_addr == ip_addr) + return found; + + return NULL; +} + +static void _rocker_neigh_add(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry) +{ + entry->index = rocker->neigh_tbl_next_index++; + entry->ref_count++; + hash_add(rocker->neigh_tbl, &entry->entry, + be32_to_cpu(entry->ip_addr)); +} + +static void _rocker_neigh_del(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry) +{ + if (--entry->ref_count == 0) { + hash_del(&entry->entry); + kfree(entry); + } +} + +static void _rocker_neigh_update(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry, + u8 *eth_dst, bool ttl_check) +{ + if (eth_dst) { + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = ttl_check; + } else { + entry->ref_count++; + } +} + +static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, + int flags, __be32 ip_addr, u8 *eth_dst) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + __be16 eth_type = htons(ETH_P_IP); + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + u32 priority = 0; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + int err = 0; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = true; + _rocker_neigh_add(rocker, entry); + } else if (removing) { + memcpy(entry, found, sizeof(*entry)); + _rocker_neigh_del(rocker, found); + } else if (updating) { + _rocker_neigh_update(rocker, found, eth_dst, true); + memcpy(entry, found, sizeof(*entry)); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (err) + goto err_out; + + /* For each active neighbor, we have an L3 unicast group and + * a /32 route to the neighbor, which uses the L3 unicast + * group. The L3 unicast group can also be referred to by + * other routes' nexthops. + */ + + err = rocker_group_l3_unicast(rocker_port, flags, + entry->index, + rocker_port->dev->dev_addr, + entry->eth_dst, + rocker_port->internal_vlan_id, + entry->ttl_check, + rocker_port->pport); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) L3 unicast group index %d\n", + err, entry->index); + goto err_out; + } + + if (adding || removing) { + group_id = ROCKER_GROUP_L3_UNICAST(entry->index); + err = rocker_flow_tbl_ucast4_routing(rocker_port, + eth_type, ip_addr, + inet_make_mask(32), + priority, goto_tbl, + group_id, flags); + + if (err) + netdev_err(rocker_port->dev, + "Error (%d) /32 unicast route %pI4 group 0x%08x\n", + err, &entry->ip_addr, group_id); + } + +err_out: + if (!adding) + kfree(entry); + + return err; +} + +static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, + __be32 ip_addr) +{ + struct net_device *dev = rocker_port->dev; + struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); + int err = 0; + + if (!n) + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (!n) + return -ENOMEM; + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + + if (n->nud_state & NUD_VALID) + err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha); + else + neigh_event_send(n, NULL); + + return err; +} + +static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags, + __be32 ip_addr, u32 *index) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + bool resolved = true; + int err = 0; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + if (found) + *index = found->index; + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + _rocker_neigh_add(rocker, entry); + *index = entry->index; + resolved = false; + } else if (removing) { + _rocker_neigh_del(rocker, found); + } else if (updating) { + _rocker_neigh_update(rocker, found, NULL, false); + resolved = !is_zero_ether_addr(found->eth_dst); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (!adding) + kfree(entry); + + if (err) + return err; + + /* Resolved means neigh ip_addr is resolved to neigh mac. */ + + if (!resolved) + err = rocker_port_ipv4_resolve(rocker_port, ip_addr); + + return err; +} + static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, int flags, __be16 vlan_id) { struct rocker_port *p; struct rocker *rocker = rocker_port->rocker; u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 group_ids[rocker->port_count]; + u32 *group_ids; u8 group_count = 0; - int err; + int err = 0; int i; + group_ids = kcalloc(rocker->port_count, sizeof(u32), + rocker_op_flags_gfp(flags)); + if (!group_ids) + return -ENOMEM; + /* Adjust the flood group for this VLAN. The flood group * references an L2 interface group for each port in this * VLAN. @@ -2674,14 +3024,13 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, continue; if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { group_ids[group_count++] = - ROCKER_GROUP_L2_INTERFACE(vlan_id, - p->lport); + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); } } /* If there are no bridged ports in this VLAN, we're done */ if (group_count == 0) - return 0; + goto no_ports_in_vlan; err = rocker_group_l2_flood(rocker_port, flags, vlan_id, group_count, group_ids, @@ -2690,6 +3039,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, netdev_err(rocker_port->dev, "Error (%d) port VLAN l2 flood group\n", err); +no_ports_in_vlan: + kfree(group_ids); return err; } @@ -2700,7 +3051,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, struct rocker *rocker = rocker_port->rocker; struct rocker_port *p; bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - u32 out_lport; + u32 out_pport; int ref = 0; int err; int i; @@ -2711,14 +3062,14 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, if (rocker_port->stp_state == BR_STATE_LEARNING || rocker_port->stp_state == BR_STATE_FORWARDING) { - out_lport = rocker_port->lport; + out_pport = rocker_port->pport; err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for lport %d\n", - err, out_lport); + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); return err; } } @@ -2737,9 +3088,9 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, if ((!adding || ref != 1) && (adding || ref != 0)) return 0; - out_lport = 0; + out_pport = 0; err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, @@ -2799,9 +3150,9 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, int flags, struct rocker_ctrl *ctrl, __be16 vlan_id) { - u32 in_lport = rocker_port->lport; - u32 in_lport_mask = 0xffffffff; - u32 out_lport = 0; + u32 in_pport = rocker_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; u8 *eth_src = NULL; u8 *eth_src_mask = NULL; __be16 vlan_id_mask = htons(0xffff); @@ -2809,11 +3160,11 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, u8 ip_proto_mask = 0; u8 ip_tos = 0; u8 ip_tos_mask = 0; - u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); int err; err = rocker_flow_tbl_acl(rocker_port, flags, - in_lport, in_lport_mask, + in_pport, in_pport_mask, eth_src, eth_src_mask, ctrl->eth_dst, ctrl->eth_dst_mask, ctrl->eth_type, @@ -2856,7 +3207,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, int flags, struct rocker_ctrl *ctrl, __be16 vlan_id) { - u32 in_lport_mask = 0xffffffff; + u32 in_pport_mask = 0xffffffff; __be16 vlan_id_mask = htons(0xffff); int err; @@ -2864,7 +3215,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, vlan_id = rocker_port->internal_vlan_id; err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, ctrl->eth_type, ctrl->eth_dst, ctrl->eth_dst_mask, vlan_id, vlan_id_mask, ctrl->copy_to_cpu, @@ -2934,7 +3285,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, { enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - u32 in_lport = rocker_port->lport; + u32 in_pport = rocker_port->pport; __be16 vlan_id = htons(vid); __be16 vlan_id_mask = htons(0xffff); __be16 internal_vlan_id; @@ -2978,7 +3329,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, } err = rocker_flow_tbl_vlan(rocker_port, flags, - in_lport, vlan_id, vlan_id_mask, + in_pport, vlan_id, vlan_id_mask, goto_tbl, untagged, internal_vlan_id); if (err) netdev_err(rocker_port->dev, @@ -2990,20 +3341,20 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) { enum rocker_of_dpa_table_id goto_tbl; - u32 in_lport; - u32 in_lport_mask; + u32 in_pport; + u32 in_pport_mask; int err; /* Normal Ethernet Frames. Matches pkts from any local physical * ports. Goto VLAN tbl. */ - in_lport = 0; - in_lport_mask = 0xffff0000; + in_pport = 0; + in_pport_mask = 0xffff0000; goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; err = rocker_flow_tbl_ig_port(rocker_port, flags, - in_lport, in_lport_mask, + in_pport, in_pport_mask, goto_tbl); if (err) netdev_err(rocker_port->dev, @@ -3047,7 +3398,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, struct rocker_fdb_learn_work *lw; enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 out_lport = rocker_port->lport; + u32 out_pport = rocker_port->pport; u32 tunnel_id = 0; u32 group_id = ROCKER_GROUP_NONE; bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); @@ -3055,7 +3406,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, int err; if (rocker_port_is_bridged(rocker_port)) - group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); if (!(flags & ROCKER_OP_FLAG_REFRESH)) { err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, @@ -3114,7 +3465,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, return -ENOMEM; fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); - fdb->key.lport = rocker_port->lport; + fdb->key.pport = rocker_port->pport; ether_addr_copy(fdb->key.addr, addr); fdb->key.vlan_id = vlan_id; fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); @@ -3161,7 +3512,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port) spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.lport != rocker_port->lport) + if (found->key.pport != rocker_port->pport) continue; if (!found->learned) continue; @@ -3182,7 +3533,7 @@ err_out: static int rocker_port_router_mac(struct rocker_port *rocker_port, int flags, __be16 vlan_id) { - u32 in_lport_mask = 0xffffffff; + u32 in_pport_mask = 0xffffffff; __be16 eth_type; const u8 *dst_mac_mask = ff_mac; __be16 vlan_id_mask = htons(0xffff); @@ -3194,7 +3545,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, eth_type = htons(ETH_P_IP); err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, eth_type, rocker_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); @@ -3203,7 +3554,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, eth_type = htons(ETH_P_IPV6); err = rocker_flow_tbl_term_mac(rocker_port, - rocker_port->lport, in_lport_mask, + rocker_port->pport, in_pport_mask, eth_type, rocker_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); @@ -3214,7 +3565,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, static int rocker_port_fwding(struct rocker_port *rocker_port) { bool pop_vlan; - u32 out_lport; + u32 out_pport; __be16 vlan_id; u16 vid; int flags = ROCKER_OP_FLAG_NOWAIT; @@ -3231,19 +3582,19 @@ static int rocker_port_fwding(struct rocker_port *rocker_port) rocker_port->stp_state != BR_STATE_FORWARDING) flags |= ROCKER_OP_FLAG_REMOVE; - out_lport = rocker_port->lport; + out_pport = rocker_port->pport; for (vid = 1; vid < VLAN_N_VID; vid++) { if (!test_bit(vid, rocker_port->vlan_bitmap)) continue; vlan_id = htons(vid); pop_vlan = rocker_vlan_id_is_internal(vlan_id); err = rocker_group_l2_interface(rocker_port, flags, - vlan_id, out_lport, + vlan_id, out_pport, pop_vlan); if (err) { netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for lport %d\n", - err, out_lport); + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); return err; } } @@ -3302,6 +3653,26 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state) return rocker_port_fwding(rocker_port); } +static int rocker_port_fwd_enable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING); +} + +static int rocker_port_fwd_disable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); +} + static struct rocker_internal_vlan_tbl_entry * rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) { @@ -3387,6 +3758,51 @@ not_found: spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); } +static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst, + int dst_len, struct fib_info *fi, u32 tb_id, + int flags) +{ + struct fib_nh *nh; + __be16 eth_type = htons(ETH_P_IP); + __be32 dst_mask = inet_make_mask(dst_len); + __be16 internal_vlan_id = rocker_port->internal_vlan_id; + u32 priority = fi->fib_priority; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + bool nh_on_port; + bool has_gw; + u32 index; + int err; + + /* XXX support ECMP */ + + nh = fi->fib_nh; + nh_on_port = (fi->fib_dev == rocker_port->dev); + has_gw = !!nh->nh_gw; + + if (has_gw && nh_on_port) { + err = rocker_port_ipv4_nh(rocker_port, flags, + nh->nh_gw, &index); + if (err) + return err; + + group_id = ROCKER_GROUP_L3_UNICAST(index); + } else { + /* Send to CPU for processing */ + group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); + } + + err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst, + dst_mask, priority, goto_tbl, + group_id, flags); + if (err) + netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", + err, &dst); + + return err; +} + /***************** * Net device ops *****************/ @@ -3394,8 +3810,6 @@ not_found: static int rocker_port_open(struct net_device *dev) { struct rocker_port *rocker_port = netdev_priv(dev); - u8 stp_state = rocker_port_is_bridged(rocker_port) ? - BR_STATE_BLOCKING : BR_STATE_FORWARDING; int err; err = rocker_port_dma_rings_init(rocker_port); @@ -3418,9 +3832,9 @@ static int rocker_port_open(struct net_device *dev) goto err_request_rx_irq; } - err = rocker_port_stp_update(rocker_port, stp_state); + err = rocker_port_fwd_enable(rocker_port); if (err) - goto err_stp_update; + goto err_fwd_enable; napi_enable(&rocker_port->napi_tx); napi_enable(&rocker_port->napi_rx); @@ -3428,7 +3842,7 @@ static int rocker_port_open(struct net_device *dev) netif_start_queue(dev); return 0; -err_stp_update: +err_fwd_enable: free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); err_request_rx_irq: free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); @@ -3445,7 +3859,7 @@ static int rocker_port_stop(struct net_device *dev) rocker_port_set_enable(rocker_port, false); napi_disable(&rocker_port->napi_rx); napi_disable(&rocker_port->napi_tx); - rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); + rocker_port_fwd_disable(rocker_port); free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); rocker_port_dma_rings_fini(rocker_port); @@ -3702,7 +4116,7 @@ static int rocker_port_fdb_dump(struct sk_buff *skb, spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.lport != rocker_port->lport) + if (found->key.pport != rocker_port->pport) continue; if (idx < cb->args[0]) goto skip; @@ -3772,22 +4186,19 @@ static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, rocker_port->brport_flags, mask); } -static int rocker_port_switch_parent_id_get(struct net_device *dev, - struct netdev_phys_item_id *psid) +static int rocker_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) { struct rocker_port *rocker_port = netdev_priv(dev); - struct rocker *rocker = rocker_port->rocker; - - psid->id_len = sizeof(rocker->hw.id); - memcpy(&psid->id, &rocker->hw.id, psid->id_len); - return 0; -} + struct port_name name = { .buf = buf, .len = len }; + int err; -static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state) -{ - struct rocker_port *rocker_port = netdev_priv(dev); + err = rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_phys_name_proc, + &name, false); - return rocker_port_stp_update(rocker_port, state); + return err ? -EOPNOTSUPP : 0; } static const struct net_device_ops rocker_port_netdev_ops = { @@ -3802,8 +4213,61 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_fdb_dump = rocker_port_fdb_dump, .ndo_bridge_setlink = rocker_port_bridge_setlink, .ndo_bridge_getlink = rocker_port_bridge_getlink, - .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get, - .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update, + .ndo_get_phys_port_name = rocker_port_get_phys_port_name, +}; + +/******************** + * swdev interface + ********************/ + +static int rocker_port_swdev_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + + psid->id_len = sizeof(rocker->hw.id); + memcpy(&psid->id, &rocker->hw.id, psid->id_len); + return 0; +} + +static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_port_stp_update(rocker_port, state); +} + +static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, + u32 nlflags, u32 tb_id) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = 0; + + return rocker_port_fib_ipv4(rocker_port, dst, dst_len, + fi, tb_id, flags); +} + +static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = ROCKER_OP_FLAG_REMOVE; + + return rocker_port_fib_ipv4(rocker_port, dst, dst_len, + fi, tb_id, flags); +} + +static const struct swdev_ops rocker_port_swdev_ops = { + .swdev_parent_id_get = rocker_port_swdev_parent_id_get, + .swdev_port_stp_update = rocker_port_swdev_port_stp_update, + .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add, + .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del, }; /******************** @@ -3882,8 +4346,8 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker, if (!cmd_stats) return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT, - rocker_port->lport)) + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, + rocker_port->pport)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_stats); @@ -3900,7 +4364,7 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; struct rocker_tlv *pattr; - u32 lport; + u32 pport; u64 *data = priv; int i; @@ -3912,11 +4376,11 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, attrs[ROCKER_TLV_CMD_INFO]); - if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]) + if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) return -EIO; - lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]); - if (lport != rocker_port->lport) + pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); + if (pport != rocker_port->pport) return -EIO; for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { @@ -4104,7 +4568,7 @@ static void rocker_carrier_init(struct rocker_port *rocker_port) u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); bool link_up; - link_up = link_status & (1 << rocker_port->lport); + link_up = link_status & (1 << rocker_port->pport); if (link_up) netif_carrier_on(rocker_port->dev); else @@ -4152,20 +4616,22 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port->dev = dev; rocker_port->rocker = rocker; rocker_port->port_number = port_number; - rocker_port->lport = port_number + 1; + rocker_port->pport = port_number + 1; rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; rocker_port_dev_addr_init(rocker, rocker_port); dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; + dev->swdev_ops = &rocker_port_swdev_ops; netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, NAPI_POLL_WEIGHT); netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, NAPI_POLL_WEIGHT); rocker_carrier_init(rocker_port); - dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_SWITCH_OFFLOAD; + dev->features |= NETIF_F_NETNS_LOCAL | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_SWITCH_OFFLOAD; err = register_netdev(dev); if (err) { @@ -4201,6 +4667,8 @@ static int rocker_probe_ports(struct rocker *rocker) alloc_size = sizeof(struct rocker_port *) * rocker->port_count; rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + if (!rocker->ports) + return -ENOMEM; for (i = 0; i < rocker->port_count; i++) { err = rocker_probe_port(rocker, i); if (err) @@ -4291,6 +4759,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { dev_err(&pdev->dev, "invalid PCI region size\n"); + err = -EINVAL; goto err_pci_resource_len_check; } @@ -4434,9 +4903,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port, rocker_port->internal_vlan_id = rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - err = rocker_port_vlan(rocker_port, 0, 0); - - return err; + return rocker_port_vlan(rocker_port, 0, 0); } static int rocker_port_bridge_leave(struct rocker_port *rocker_port) @@ -4456,6 +4923,11 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); err = rocker_port_vlan(rocker_port, 0, 0); + if (err) + return err; + + if (rocker_port->dev->flags & IFF_UP) + err = rocker_port_fwd_enable(rocker_port); return err; } @@ -4466,10 +4938,16 @@ static int rocker_port_master_changed(struct net_device *dev) struct net_device *master = netdev_master_upper_dev_get(dev); int err = 0; + /* There are currently three cases handled here: + * 1. Joining a bridge + * 2. Leaving a previously joined bridge + * 3. Other, e.g. being added to or removed from a bond or openvswitch, + * in which case nothing is done + */ if (master && master->rtnl_link_ops && !strcmp(master->rtnl_link_ops->kind, "bridge")) err = rocker_port_bridge_join(rocker_port, master); - else + else if (rocker_port_is_bridged(rocker_port)) err = rocker_port_bridge_leave(rocker_port); return err; @@ -4501,6 +4979,48 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = { .notifier_call = rocker_netdevice_event, }; +/************************************ + * Net event notifier event handler + ************************************/ + +static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE; + __be32 ip_addr = *(__be32 *)n->primary_key; + + return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha); +} + +static int rocker_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct neighbour *n = ptr; + int err; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + dev = n->dev; + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + err = rocker_neigh_update(dev, n); + if (err) + netdev_warn(dev, + "failed to handle neigh update (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netevent_nb __read_mostly = { + .notifier_call = rocker_netevent_event, +}; + /*********************** * Module init and exit ***********************/ @@ -4510,18 +5030,21 @@ static int __init rocker_module_init(void) int err; register_netdevice_notifier(&rocker_netdevice_nb); + register_netevent_notifier(&rocker_netevent_nb); err = pci_register_driver(&rocker_pci_driver); if (err) goto err_pci_register_driver; return 0; err_pci_register_driver: + unregister_netdevice_notifier(&rocker_netevent_nb); unregister_netdevice_notifier(&rocker_netdevice_nb); return err; } static void __exit rocker_module_exit(void) { + unregister_netevent_notifier(&rocker_netevent_nb); unregister_netdevice_notifier(&rocker_netdevice_nb); pci_unregister_driver(&rocker_pci_driver); } diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index a5bc432feada..a4e9591d7457 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -14,6 +14,21 @@ #include <linux/types.h> +/* Return codes */ +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + +#define ROCKER_FP_PORTS_MAX 62 + #define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 @@ -136,13 +151,14 @@ enum { enum { ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, - ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, ROCKER_TLV_CMD_PORT_SETTINGS_MAX = @@ -151,7 +167,7 @@ enum { enum { ROCKER_TLV_CMD_PORT_STATS_UNSPEC, - ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ @@ -191,7 +207,7 @@ enum { enum { ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, - ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, @@ -201,7 +217,7 @@ enum { enum { ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, - ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ @@ -275,9 +291,9 @@ enum { ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ - ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */ - ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */ - ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ @@ -291,7 +307,7 @@ enum { ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ - ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index c8a01ee4d25e..413ea14ab91f 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -422,11 +422,11 @@ static int init_tx_ring(struct device *dev, u8 queue_no, /* assign queue number */ tx_ring->queue_no = queue_no; - /* initalise counters */ + /* initialise counters */ tx_ring->dirty_tx = 0; tx_ring->cur_tx = 0; - /* initalise TX queue lock */ + /* initialise TX queue lock */ spin_lock_init(&tx_ring->tx_lock); return 0; @@ -515,7 +515,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, goto err_free_rx_buffers; } - /* initalise counters */ + /* initialise counters */ rx_ring->cur_rx = 0; rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); priv->dma_buf_sz = bfsize; @@ -837,7 +837,7 @@ static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num) /* free the skbuffs of the ring */ tx_free_ring_skbufs(tx_ring); - /* initalise counters */ + /* initialise counters */ tx_ring->cur_tx = 0; tx_ring->dirty_tx = 0; @@ -1176,7 +1176,7 @@ static int sxgbe_open(struct net_device *dev) if (priv->phydev) phy_start(priv->phydev); - /* initalise TX coalesce parameters */ + /* initialise TX coalesce parameters */ sxgbe_tx_init_coalesce(priv); if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { @@ -1721,7 +1721,7 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) * Description: * This function is a driver entry point whenever ifconfig command gets * executed to see device statistics. Statistics are number of - * bytes sent or received, errors occured etc. + * bytes sent or received, errors occurred etc. * Return value: * This function returns various statistical information of device. */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 238482495e81..4b00545a3ace 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2523,9 +2523,7 @@ int efx_try_recovery(struct efx_nic *efx) * schedule a 'recover or reset', leading to this recovery handler. * Manually call the eeh failure check function. */ - struct eeh_dev *eehdev = - of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); - + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); if (eeh_dev_check_failure(eehdev)) { /* The EEH mechanisms will handle the error and reset the * device if necessary. @@ -3215,7 +3213,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, return status; } -/* Fake a successfull reset, which will be performed later in efx_io_resume. */ +/* Fake a successful reset, which will be performed later in efx_io_resume. */ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) { struct efx_nic *efx = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 75975328e020..bb89e96a125e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -645,7 +645,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx) } /* Flush all the transmit queues, and continue flushing receive queues until - * they're all flushed. Wait for the DRAIN events to be recieved so that there + * they're all flushed. Wait for the DRAIN events to be received so that there * are no more RX and TX events left on any channel. */ static int efx_farch_do_flush(struct efx_nic *efx) { @@ -1108,7 +1108,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) } /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush - * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add + * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add * the RX queue back to the mask of RX queues in need of flushing. */ static void diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index a707fb5ef14c..e028de10e1b7 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -6497,7 +6497,7 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) -/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ +/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 6b861e3de4b0..a2e9aee05cdd 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -323,9 +323,9 @@ struct efx_ptp_data { static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); -static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts); +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); static int efx_phc_settime(struct ptp_clock_info *ptp, - const struct timespec *e_ts); + const struct timespec64 *e_ts); static int efx_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); @@ -1198,8 +1198,8 @@ static const struct ptp_clock_info efx_phc_clock_info = { .pps = 1, .adjfreq = efx_phc_adjfreq, .adjtime = efx_phc_adjtime, - .gettime = efx_phc_gettime, - .settime = efx_phc_settime, + .gettime64 = efx_phc_gettime, + .settime64 = efx_phc_settime, .enable = efx_phc_enable, }; @@ -1837,7 +1837,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) NULL, 0, NULL); } -static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct efx_ptp_data *ptp_data = container_of(ptp, struct efx_ptp_data, @@ -1859,28 +1859,28 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) kt = ptp_data->nic_to_kernel_time( MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR), MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0); - *ts = ktime_to_timespec(kt); + *ts = ktime_to_timespec64(kt); return 0; } static int efx_phc_settime(struct ptp_clock_info *ptp, - const struct timespec *e_ts) + const struct timespec64 *e_ts) { /* Get the current NIC time, efx_phc_gettime. * Subtract from the desired time to get the offset * call efx_phc_adjtime with the offset */ int rc; - struct timespec time_now; - struct timespec delta; + struct timespec64 time_now; + struct timespec64 delta; rc = efx_phc_gettime(ptp, &time_now); if (rc != 0) return rc; - delta = timespec_sub(*e_ts, time_now); + delta = timespec64_sub(*e_ts, time_now); - rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta)); + rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta)); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 10b6173d557d..b605dfd5c7bc 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -46,7 +46,7 @@ struct efx_loopback_payload { struct iphdr ip; struct udphdr udp; __be16 iteration; - const char msg[64]; + char msg[64]; } __packed; /* Loopback test source MAC address */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 3583f0208a6e..f12c811938d2 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -205,8 +205,7 @@ static int siena_map_reset_flags(u32 *flags) */ static void siena_monitor(struct efx_nic *efx) { - struct eeh_dev *eehdev = - of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); eeh_dev_check_failure(eehdev); } diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index a8bbbad68a88..fe83430796fd 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1067,7 +1067,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx) } /* Copy the list of individual addresses into the vfdi_status.peers - * array and auxillary pages, protected by %local_lock. Drop that lock + * array and auxiliary pages, protected by %local_lock. Drop that lock * and then broadcast the address list to every VF. */ static void efx_siena_sriov_peer_work(struct work_struct *data) diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h index ae044f44936a..f62901d4cae0 100644 --- a/drivers/net/ethernet/sfc/vfdi.h +++ b/drivers/net/ethernet/sfc/vfdi.h @@ -98,7 +98,7 @@ struct vfdi_endpoint { * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ. * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then * finalize the SRAM entries. - * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ. + * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ. * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters. * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates * from PF and write the initial status. @@ -148,7 +148,7 @@ enum vfdi_op { * @u.init_txq.flags: Checksum offload flags. * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA * address of each page backing the transmit queue. - * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting + * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting * all traffic at this receive queue. * @u.mac_filter.flags: MAC filter flags. * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status. diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 6b33127ab352..3449893aea8d 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev) smc->packets_waiting = 0; smc_reset(dev); - init_timer(&smc->media); - smc->media.function = media_check; - smc->media.data = (u_long) dev; - smc->media.expires = jiffies + HZ; - add_timer(&smc->media); + setup_timer(&smc->media, media_check, (u_long)dev); + mod_timer(&smc->media, jiffies + HZ); return 0; } /* smc_open */ diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 88a55f95fe09..14b363a25c02 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -91,6 +91,11 @@ static const char version[] = #include "smc91x.h" +#if defined(CONFIG_ASSABET_NEPONSET) +#include <mach/assabet.h> +#include <mach/neponset.h> +#endif + #ifndef SMC_NOWAIT # define SMC_NOWAIT 0 #endif @@ -2199,27 +2204,17 @@ static int try_toggle_control_gpio(struct device *dev, int value, unsigned int nsdelay) { struct gpio_desc *gpio = *desc; - int res; - - gpio = devm_gpiod_get_index(dev, name, index); - if (IS_ERR(gpio)) { - if (PTR_ERR(gpio) == -ENOENT) { - *desc = NULL; - return 0; - } + enum gpiod_flags flags = value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH; + gpio = devm_gpiod_get_index_optional(dev, name, index, flags); + if (IS_ERR(gpio)) return PTR_ERR(gpio); + + if (gpio) { + if (nsdelay) + usleep_range(nsdelay, 2 * nsdelay); + gpiod_set_value_cansleep(gpio, value); } - res = gpiod_direction_output(gpio, !value); - if (res) { - dev_err(dev, "unable to toggle gpio %s: %i\n", name, res); - devm_gpiod_put(dev, gpio); - gpio = NULL; - return res; - } - if (nsdelay) - usleep_range(nsdelay, 2 * nsdelay); - gpiod_set_value_cansleep(gpio, value); *desc = gpio; return 0; @@ -2243,10 +2238,9 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res; + struct resource *res, *ires; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; - unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2338,25 +2332,23 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { ret = -ENODEV; goto out_release_io; } - /* - * If this platform does not specify any special irqflags, or if - * the resource supplies a trigger, override the irqflags with - * the trigger flags from the resource. - */ - irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); - if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) - irq_flags = irq_resflags & IRQF_TRIGGER_MASK; + + ndev->irq = ires->start; + + if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) + irq_flags = ires->flags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) goto out_release_io; -#if defined(CONFIG_SA1100_ASSABET) - neponset_ncr_set(NCR_ENET_OSC_EN); +#if defined(CONFIG_ASSABET_NEPONSET) + if (machine_is_assabet() && machine_has_neponset()) + neponset_ncr_set(NCR_ENET_OSC_EN); #endif platform_set_drvdata(pdev, ndev); ret = smc_enable_device(pdev); diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index be67baf5f677..3a18501d1068 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -39,14 +39,7 @@ * Define your architecture specific bus configuration parameters here. */ -#if defined(CONFIG_ARCH_LUBBOCK) ||\ - defined(CONFIG_MACH_MAINSTONE) ||\ - defined(CONFIG_MACH_ZYLONITE) ||\ - defined(CONFIG_MACH_LITTLETON) ||\ - defined(CONFIG_MACH_ZYLONITE2) ||\ - defined(CONFIG_ARCH_VIPER) ||\ - defined(CONFIG_MACH_STARGATE2) ||\ - defined(CONFIG_ARCH_VERSATILE) +#if defined(CONFIG_ARM) #include <asm/mach-types.h> @@ -74,95 +67,8 @@ /* We actually can't write halfwords properly if not word aligned */ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) { - if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { - unsigned int v = val << 16; - v |= readl(ioaddr + (reg & ~2)) & 0xffff; - writel(v, ioaddr + (reg & ~2)); - } else { - writew(val, ioaddr + reg); - } -} - -#elif defined(CONFIG_SA1100_PLEB) -/* We can only do 16-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS (-1) - -#elif defined(CONFIG_SA1100_ASSABET) - -#include <mach/neponset.h> - -/* We can only do 8-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 0 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -/* The first two address lines aren't connected... */ -#define SMC_IO_SHIFT 2 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \ - defined(CONFIG_MACH_NOMADIK_8815NHK) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#elif defined(CONFIG_ARCH_INNOKOM) || \ - defined(CONFIG_ARCH_PXA_IDP) || \ - defined(CONFIG_ARCH_RAMSES) || \ - defined(CONFIG_ARCH_PCM027) - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 -#define SMC_USE_PXA_DMA 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -/* We actually can't write halfwords properly if not word aligned */ -static inline void -SMC_outw(u16 val, void __iomem *ioaddr, int reg) -{ - if (reg & 2) { + if ((machine_is_mainstone() || machine_is_stargate2() || + machine_is_pxa_idp()) && reg & 2) { unsigned int v = val << 16; v |= readl(ioaddr + (reg & ~2)) & 0xffff; writel(v, ioaddr + (reg & ~2)); @@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define RPC_LSA_DEFAULT RPC_LED_100_10 #define RPC_LSB_DEFAULT RPC_LED_TX_RX -#elif defined(CONFIG_ARCH_MSM) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH - #elif defined(CONFIG_COLDFIRE) #define SMC_CAN_USE_8BIT 0 diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 2965c6ae7d6e..41047c9143d0 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -843,7 +843,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) unsigned long flags; /* Initialise tx packet using broadcast destination address */ - memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); + eth_broadcast_addr(pdata->loopback_tx_pkt); /* Use incrementing source address */ for (i = 6; i < 12; i++) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index cd77289c3cfe..623c6ed8764a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -150,7 +150,7 @@ struct stmmac_extra_stats { #define MAC_CSR_H_FRQ_MASK 0x20 #define HASH_TABLE_SIZE 64 -#define PAUSE_TIME 0x200 +#define PAUSE_TIME 0xffff /* Flow Control defines */ #define FLOW_OFF 0 @@ -357,7 +357,8 @@ struct stmmac_dma_ops { void (*dump_regs) (void __iomem *ioaddr); /* Set tx/rx threshold in the csr6 register * An invalid value enables the store-and-forward mode */ - void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode); + void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, + int rxfifosz); /* To track extra statistic (if supported) */ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void __iomem *ioaddr); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index e97074cd5800..5a36bd2c7837 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -91,7 +91,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * STMMAC_RESOURCE_NAME); if (IS_ERR(dwmac->stmmac_rst)) { dev_info(dev, "Could not get reset control!\n"); - return -EINVAL; + if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dwmac->stmmac_rst = NULL; } dwmac->interface = of_get_phy_mode(np); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 64d8f56a9c17..b3fe0575ff6b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -172,6 +172,7 @@ enum inter_frame_gap { /* GMAC FLOW CTRL defines */ #define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ #define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ #define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ @@ -246,6 +247,56 @@ enum ttc_control { #define DMA_CONTROL_FEF 0x00000080 #define DMA_CONTROL_FUF 0x00000040 +/* Receive flow control activation field + * RFA field in DMA control register, bits 23,10:9 + */ +#define DMA_CONTROL_RFA_MASK 0x00800600 + +/* Receive flow control deactivation field + * RFD field in DMA control register, bits 22,12:11 + */ +#define DMA_CONTROL_RFD_MASK 0x00401800 + +/* RFD and RFA fields are encoded as follows + * + * Bit Field + * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled) + * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,11 - Reserved + * + * RFD should always be > RFA for a given FIFO size. RFD == RFA may work, + * but packet throughput performance may not be as expected. + * + * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame + * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause + * Description). + * + * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6), + * is set to 0. This allows pause frames with a quanta of 0 to be sent + * as an XOFF message to the link peer. + */ + +#define RFA_FULL_MINUS_1K 0x00000000 +#define RFA_FULL_MINUS_2K 0x00000200 +#define RFA_FULL_MINUS_3K 0x00000400 +#define RFA_FULL_MINUS_4K 0x00000600 +#define RFA_FULL_MINUS_5K 0x00800000 +#define RFA_FULL_MINUS_6K 0x00800200 +#define RFA_FULL_MINUS_7K 0x00800400 + +#define RFD_FULL_MINUS_1K 0x00000000 +#define RFD_FULL_MINUS_2K 0x00000800 +#define RFD_FULL_MINUS_3K 0x00001000 +#define RFD_FULL_MINUS_4K 0x00001800 +#define RFD_FULL_MINUS_5K 0x00400000 +#define RFD_FULL_MINUS_6K 0x00400800 +#define RFD_FULL_MINUS_7K 0x00401000 + enum rtc_control { DMA_CONTROL_RTC_64 = 0x00000000, DMA_CONTROL_RTC_32 = 0x00000008, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 0adcf73cf722..371a669d69fd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -201,7 +201,10 @@ static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, unsigned int fc, unsigned int pause_time) { void __iomem *ioaddr = hw->pcsr; - unsigned int flow = 0; + /* Set flow such that DZPQ in Mac Register 6 is 0, + * and unicast pause detect is enabled. + */ + unsigned int flow = GMAC_FLOW_CTRL_UP; pr_debug("GMAC Flow-Control:\n"); if (fc & FLOW_RX) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 59d92e811750..0e8937c1184a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -106,8 +106,29 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, return 0; } +static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) +{ + csr6 &= ~DMA_CONTROL_RFA_MASK; + csr6 &= ~DMA_CONTROL_RFD_MASK; + + /* Leave flow control disabled if receive fifo size is less than + * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full, + * and send XON when 2K less than full. + */ + if (rxfifosz < 4096) { + csr6 &= ~DMA_CONTROL_EFC; + pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n", + rxfifosz); + } else { + csr6 |= DMA_CONTROL_EFC; + csr6 |= RFA_FULL_MINUS_1K; + csr6 |= RFD_FULL_MINUS_2K; + } + return csr6; +} + static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode) + int rxmode, int rxfifosz) { u32 csr6 = readl(ioaddr + DMA_CONTROL); @@ -153,6 +174,9 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, csr6 |= DMA_CONTROL_RTC_128; } + /* Configure flow control based on rx fifo size */ + csr6 = dwmac1000_configure_fc(csr6, rxfifosz); + writel(csr6, ioaddr + DMA_CONTROL); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 7d1dce9e7ffc..9d0971c1c2ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -72,7 +72,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, * control register. */ static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode) + int rxmode, int rxfifosz) { u32 csr6 = readl(ioaddr + DMA_CONTROL); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c0a391983372..2ac9552d1fa3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -97,6 +97,7 @@ struct stmmac_priv { int wolopts; int wol_irq; struct clk *stmmac_clk; + struct clk *pclk; struct reset_control *stmmac_rst; int clk_csr; struct timer_list eee_ctrl_timer; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 55e89b3838f1..05c146f718a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) spin_lock_irqsave(&priv->lock, flags); if (!priv->eee_active) { priv->eee_active = 1; - init_timer(&priv->eee_ctrl_timer); - priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; - priv->eee_ctrl_timer.data = (unsigned long)priv; - priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); - add_timer(&priv->eee_ctrl_timer); + setup_timer(&priv->eee_ctrl_timer, + stmmac_eee_ctrl_timer, + (unsigned long)priv); + mod_timer(&priv->eee_ctrl_timer, + STMMAC_LPI_T(eee_timer)); priv->hw->mac->set_eee_timer(priv->hw, STMMAC_DEFAULT_LIT_LS, @@ -609,7 +609,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) * where, freq_div_ratio = clk_ptp_ref_i/50MHz * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; * NOTE: clk_ptp_ref_i should be >= 50MHz to - * achive 20ns accuracy. + * achieve 20ns accuracy. * * 2^x * y == (y << x), hence * 2^32 * 50000000 ==> (50000000 << 32) @@ -1277,8 +1277,10 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { + int rxfifosz = priv->plat->rx_fifo_size; + if (priv->plat->force_thresh_dma_mode) - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); + priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { /* * In case of GMAC, SF mode can be enabled @@ -1287,10 +1289,12 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) * 2) There is no bugged Jumbo frame support * that needs to not insert csum in the TDES. */ - priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); + priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, + rxfifosz); priv->xstats.threshold = SF_DMA_MODE; } else - priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); + priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, + rxfifosz); } /** @@ -1442,6 +1446,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv) static void stmmac_dma_interrupt(struct stmmac_priv *priv) { int status; + int rxfifosz = priv->plat->rx_fifo_size; status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); if (likely((status & handle_rx)) || (status & handle_tx)) { @@ -1456,10 +1461,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) (tc <= 256)) { tc += 64; if (priv->plat->force_thresh_dma_mode) - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); + priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, + rxfifosz); else priv->hw->dma->dma_mode(priv->ioaddr, tc, - SF_DMA_MODE); + SF_DMA_MODE, rxfifosz); priv->xstats.threshold = tc; } } else if (unlikely(status == tx_hard_error)) @@ -2849,6 +2855,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, } clk_prepare_enable(priv->stmmac_clk); + priv->pclk = devm_clk_get(priv->device, "pclk"); + if (IS_ERR(priv->pclk)) { + if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto error_pclk_get; + } + priv->pclk = NULL; + } + clk_prepare_enable(priv->pclk); + priv->stmmac_rst = devm_reset_control_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_rst)) { @@ -2934,6 +2950,8 @@ error_mdio_register: error_netdev_register: netif_napi_del(&priv->napi); error_hw_init: + clk_disable_unprepare(priv->pclk); +error_pclk_get: clk_disable_unprepare(priv->stmmac_clk); error_clk_get: free_netdev(ndev); @@ -2958,14 +2976,15 @@ int stmmac_dvr_remove(struct net_device *ndev) priv->hw->dma->stop_tx(priv->ioaddr); stmmac_set_mac(priv->ioaddr, false); - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) - stmmac_mdio_unregister(ndev); netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->stmmac_rst) reset_control_assert(priv->stmmac_rst); + clk_disable_unprepare(priv->pclk); clk_disable_unprepare(priv->stmmac_clk); + if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && + priv->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); free_netdev(ndev); return 0; @@ -3011,6 +3030,7 @@ int stmmac_suspend(struct net_device *ndev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ + clk_disable(priv->pclk); clk_disable(priv->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); @@ -3051,6 +3071,7 @@ int stmmac_resume(struct net_device *ndev) pinctrl_pm_select_default_state(priv->device); /* enable the clk prevously disabled */ clk_enable(priv->stmmac_clk); + clk_enable(priv->pclk); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..705bbdf93940 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -181,6 +181,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); + of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); + + of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); + plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); @@ -272,6 +276,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; const char *mac = NULL; + int irq, wol_irq, lpi_irq; + + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. + */ + irq = platform_get_irq_byname(pdev, "macirq"); + if (irq < 0) { + if (irq != -EPROBE_DEFER) { + dev_err(dev, + "MAC IRQ configuration information not found\n"); + } + return irq; + } + + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (wol_irq < 0) { + if (wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + wol_irq = irq; + } + + lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(dev, res); @@ -323,39 +358,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) return PTR_ERR(priv); } + /* Copy IRQ values to priv structure which is now avaialble */ + priv->dev->irq = irq; + priv->wol_irq = wol_irq; + priv->lpi_irq = lpi_irq; + /* Get MAC address if available (DT) */ if (mac) memcpy(priv->dev->dev_addr, mac, ETH_ALEN); - /* Get the MAC information */ - priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); - if (priv->dev->irq < 0) { - if (priv->dev->irq != -EPROBE_DEFER) { - netdev_err(priv->dev, - "MAC IRQ configuration information not found\n"); - } - return priv->dev->irq; - } - - /* - * On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (priv->wol_irq < 0) { - if (priv->wol_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - priv->wol_irq = priv->dev->irq; - } - - priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (priv->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - platform_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index c5ee79d8a8c5..170a18b61281 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -105,13 +105,12 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) * Description: this function will read the current time from the * hardware clock and store it in @ts. */ -static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts) +static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; u64 ns; - u32 reminder; spin_lock_irqsave(&priv->ptp_lock, flags); @@ -119,8 +118,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts) spin_unlock_irqrestore(&priv->ptp_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder); - ts->tv_nsec = reminder; + *ts = ns_to_timespec64(ns); return 0; } @@ -135,7 +133,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts) * hardware clock. */ static int stmmac_set_time(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); @@ -168,8 +166,8 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .pps = 0, .adjfreq = stmmac_adjust_freq, .adjtime = stmmac_adjust_time, - .gettime = stmmac_get_time, - .settime = stmmac_set_time, + .gettime64 = stmmac_get_time, + .settime64 = stmmac_set_time, .enable = stmmac_enable, }; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4b51f903fb73..0c5842aeb807 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type) *flow_type = IP_USER_FLOW; break; default: - return 0; + return -EINVAL; } - return 1; + return 0; } static int niu_ethflow_to_class(int flow_type, u64 *class) @@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np, class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; ret = niu_class_to_ethflow(class, &fsp->flow_type); - if (ret < 0) { netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", parent->index); - ret = -EINVAL; goto out; } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index fef5dec2cffe..e23a642357e7 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -718,7 +718,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) cluster_start = curr = (gp->rx_new & ~(4 - 1)); count = 0; kick = -1; - wmb(); + dma_wmb(); while (curr != limit) { curr = NEXT_RX(curr); if (++count == 4) { @@ -1038,7 +1038,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, if (gem_intme(entry)) ctrl |= TXDCTRL_INTME; txd->buffer = cpu_to_le64(mapping); - wmb(); + dma_wmb(); txd->control_word = cpu_to_le64(ctrl); entry = NEXT_TX(entry); } else { @@ -1076,7 +1076,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, txd = &gp->init_block->txd[entry]; txd->buffer = cpu_to_le64(mapping); - wmb(); + dma_wmb(); txd->control_word = cpu_to_le64(this_ctrl | len); if (gem_intme(entry)) @@ -1086,7 +1086,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, } txd = &gp->init_block->txd[first_entry]; txd->buffer = cpu_to_le64(first_mapping); - wmb(); + dma_wmb(); txd->control_word = cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); } @@ -1585,7 +1585,7 @@ static void gem_clean_rings(struct gem *gp) gp->rx_skbs[i] = NULL; } rxd->status_word = 0; - wmb(); + dma_wmb(); rxd->buffer = 0; } @@ -1647,7 +1647,7 @@ static void gem_init_rings(struct gem *gp) RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE); rxd->buffer = cpu_to_le64(dma_addr); - wmb(); + dma_wmb(); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); skb_reserve(skb, RX_OFFSET); } @@ -1656,7 +1656,7 @@ static void gem_init_rings(struct gem *gp) struct gem_txd *txd = &gb->txd[i]; txd->control_word = 0; - wmb(); + dma_wmb(); txd->buffer = 0; } wmb(); @@ -2175,7 +2175,7 @@ static int gem_do_start(struct net_device *dev) } /* Mark us as attached again if we come from resume(), this has - * no effect if we weren't detatched and needs to be done now. + * no effect if we weren't detached and needs to be done now. */ netif_device_attach(dev); @@ -2794,7 +2794,7 @@ static void gem_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - /* Ensure reset task is truely gone */ + /* Ensure reset task is truly gone */ cancel_work_sync(&gp->reset_task); /* Free resources */ diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 7a8ca2c7b7df..cf4dcff051d5 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -196,14 +196,14 @@ static u32 sbus_hme_read32(void __iomem *reg) static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) { rxd->rx_addr = (__force hme32)addr; - wmb(); + dma_wmb(); rxd->rx_flags = (__force hme32)flags; } static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) { txd->tx_addr = (__force hme32)addr; - wmb(); + dma_wmb(); txd->tx_flags = (__force hme32)flags; } @@ -225,14 +225,14 @@ static u32 pci_hme_read32(void __iomem *reg) static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) { rxd->rx_addr = (__force hme32)cpu_to_le32(addr); - wmb(); + dma_wmb(); rxd->rx_flags = (__force hme32)cpu_to_le32(flags); } static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) { txd->tx_addr = (__force hme32)cpu_to_le32(addr); - wmb(); + dma_wmb(); txd->tx_flags = (__force hme32)cpu_to_le32(flags); } @@ -268,12 +268,12 @@ static u32 pci_hme_read_desc32(hme32 *p) sbus_readl(__reg) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ - wmb(); \ + dma_wmb(); \ (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_write_txd(__hp, __txd, __flags, __addr) \ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ - wmb(); \ + dma_wmb(); \ (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) @@ -293,12 +293,12 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ readl(__reg) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ - wmb(); \ + dma_wmb(); \ (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ } while(0) #define hme_write_txd(__hp, __txd, __flags, __addr) \ do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ - wmb(); \ + dma_wmb(); \ (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ } while(0) static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 22e0cad1b4b5..53fe200e0b79 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -519,7 +519,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, if (desc->hdr.state != VIO_DESC_READY) return 1; - rmb(); + dma_rmb(); viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", desc->hdr.state, desc->hdr.ack, @@ -1380,7 +1380,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) /* This has to be a non-SMP write barrier because we are writing * to memory which is shared with the peer LDOM. */ - wmb(); + dma_wmb(); d->hdr.state = VIO_DESC_READY; @@ -1395,7 +1395,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) * is marked READY, but start_cons was false. * If so, vnet_ack() should send out the missed "start" trigger. * - * Note that the wmb() above makes sure the cookies et al. are + * Note that the dma_wmb() above makes sure the cookies et al. are * not globally visible before the VIO_DESC_READY, and that the * stores are ordered correctly by the compiler. The consumer will * not proceed until the VIO_DESC_READY is visible assuring that @@ -1411,6 +1411,8 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(err < 0)) { netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; dev->stats.tx_carrier_errors++; goto out_dropped; } diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index f6a71092e135..631e0afd07d2 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -88,6 +88,7 @@ config TI_CPTS config TI_KEYSTONE_NETCP tristate "TI Keystone NETCP Core Support" select TI_CPSW_ALE + select TI_DAVINCI_MDIO depends on OF depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS ---help--- diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7d8dd0d2182e..b536b4c82752 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -726,7 +726,7 @@ static void cpsw_rx_handler(void *token, int len, int status) if (ndev_status && (status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up - * and running, intead of freeing which results + * and running, instead of freeing which results * in reducing of the number of rx descriptor in * DMA engine, requeue skb back to cpdma. */ @@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, slave->port_vlan, 0); cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, slave->port_vlan); + priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); } static void soft_reset_slave(struct cpsw_slave *slave) @@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev) } return 0; } +#endif -static const struct dev_pm_ops cpsw_pm_ops = { - .suspend = cpsw_suspend, - .resume = cpsw_resume, -}; +static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); static const struct of_device_id cpsw_of_mtable[] = { { .compatible = "ti,cpsw", }, diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index fbe42cb107ec..85a55b4ff8c0 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -167,10 +167,9 @@ static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct cpts *cpts = container_of(ptp, struct cpts, info); @@ -178,21 +177,19 @@ static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) ns = timecounter_read(&cpts->tc); spin_unlock_irqrestore(&cpts->lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } static int cpts_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { u64 ns; unsigned long flags; struct cpts *cpts = container_of(ptp, struct cpts, info); - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); spin_lock_irqsave(&cpts->lock, flags); timecounter_init(&cpts->tc, &cpts->cc, ns); @@ -216,20 +213,20 @@ static struct ptp_clock_info cpts_info = { .pps = 0, .adjfreq = cpts_ptp_adjfreq, .adjtime = cpts_ptp_adjtime, - .gettime = cpts_ptp_gettime, - .settime = cpts_ptp_settime, + .gettime64 = cpts_ptp_gettime, + .settime64 = cpts_ptp_settime, .enable = cpts_ptp_enable, }; static void cpts_overflow_check(struct work_struct *work) { - struct timespec ts; + struct timespec64 ts; struct cpts *cpts = container_of(work, struct cpts, overflow_work.work); cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); cpts_ptp_gettime(&cpts->info, &ts); - pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); } diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 98655b44b97e..c00084d689f3 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int davinci_mdio_suspend(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); @@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev) return 0; } +#endif static const struct dev_pm_ops davinci_mdio_pm_ops = { - .suspend_late = davinci_mdio_suspend, - .resume_early = davinci_mdio_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) }; #if IS_ENABLED(CONFIG_OF) diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 906e9bc412f5..bbacf5cccec2 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -41,7 +41,10 @@ struct netcp_tx_pipe { struct netcp_device *netcp_device; void *dma_queue; unsigned int dma_queue_id; - u8 dma_psflags; + /* To port for packet forwarded to switch. Used only by ethss */ + u8 switch_to_port; +#define SWITCH_TO_PORT_IN_TAGINFO BIT(0) + u8 flags; void *dma_channel; const char *dma_chan_name; }; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a31a8c3c8e7c..43efc3a0cda5 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1098,9 +1098,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, struct netcp_tx_pipe *tx_pipe = NULL; struct netcp_hook_list *tx_hook; struct netcp_packet p_info; - u32 packet_info = 0; unsigned int dma_sz; dma_addr_t dma; + u32 tmp = 0; int ret = 0; p_info.netcp = netcp; @@ -1140,20 +1140,27 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, p_info.psdata_len); set_words(psdata, p_info.psdata_len, psdata); - packet_info |= - (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << + tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << KNAV_DMA_DESC_PSLEN_SHIFT; } - packet_info |= KNAV_DMA_DESC_HAS_EPIB | + tmp |= KNAV_DMA_DESC_HAS_EPIB | ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << - KNAV_DMA_DESC_RETQ_SHIFT) | - ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) << - KNAV_DMA_DESC_PSFLAG_SHIFT); + KNAV_DMA_DESC_RETQ_SHIFT); - set_words(&packet_info, 1, &desc->packet_info); + if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) { + tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) << + KNAV_DMA_DESC_PSFLAG_SHIFT); + } + + set_words(&tmp, 1, &desc->packet_info); set_words((u32 *)&skb, 1, &desc->pad[0]); + if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { + tmp = tx_pipe->switch_to_port; + set_words((u32 *)&tmp, 1, &desc->tag_info); + } + /* submit packet descriptor */ ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma, &dma_sz); @@ -1320,7 +1327,7 @@ static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, if (addr) ether_addr_copy(naddr->addr, addr); else - memset(naddr->addr, 0, ETH_ALEN); + eth_zero_addr(naddr->addr); list_add_tail(&naddr->node, &netcp->addr_list); return naddr; @@ -2127,7 +2134,7 @@ static int netcp_remove(struct platform_device *pdev) return 0; } -static struct of_device_id of_match[] = { +static const struct of_device_id of_match[] = { { .compatible = "ti,netcp-1.0", }, {}, }; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 84f5ce525750..2bef655279f3 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -40,35 +40,61 @@ #define GBE_MODULE_NAME "netcp-gbe" #define GBE_SS_VERSION_14 0x4ed21104 +#define GBE_SS_REG_INDEX 0 +#define GBE_SGMII34_REG_INDEX 1 +#define GBE_SM_REG_INDEX 2 +/* offset relative to base of GBE_SS_REG_INDEX */ #define GBE13_SGMII_MODULE_OFFSET 0x100 -#define GBE13_SGMII34_MODULE_OFFSET 0x400 -#define GBE13_SWITCH_MODULE_OFFSET 0x800 -#define GBE13_HOST_PORT_OFFSET 0x834 -#define GBE13_SLAVE_PORT_OFFSET 0x860 -#define GBE13_EMAC_OFFSET 0x900 -#define GBE13_SLAVE_PORT2_OFFSET 0xa00 -#define GBE13_HW_STATS_OFFSET 0xb00 -#define GBE13_ALE_OFFSET 0xe00 +/* offset relative to base of GBE_SM_REG_INDEX */ +#define GBE13_HOST_PORT_OFFSET 0x34 +#define GBE13_SLAVE_PORT_OFFSET 0x60 +#define GBE13_EMAC_OFFSET 0x100 +#define GBE13_SLAVE_PORT2_OFFSET 0x200 +#define GBE13_HW_STATS_OFFSET 0x300 +#define GBE13_ALE_OFFSET 0x600 #define GBE13_HOST_PORT_NUM 0 -#define GBE13_NUM_SLAVES 4 -#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1) #define GBE13_NUM_ALE_ENTRIES 1024 +/* 1G Ethernet NU SS defines */ +#define GBENU_MODULE_NAME "netcp-gbenu" +#define GBE_SS_ID_NU 0x4ee6 +#define GBE_SS_ID_2U 0x4ee8 + +#define IS_SS_ID_MU(d) \ + ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)) + +#define IS_SS_ID_NU(d) \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) + +#define GBENU_SS_REG_INDEX 0 +#define GBENU_SM_REG_INDEX 1 +#define GBENU_SGMII_MODULE_OFFSET 0x100 +#define GBENU_HOST_PORT_OFFSET 0x1000 +#define GBENU_SLAVE_PORT_OFFSET 0x2000 +#define GBENU_EMAC_OFFSET 0x2330 +#define GBENU_HW_STATS_OFFSET 0x1a000 +#define GBENU_ALE_OFFSET 0x1e000 +#define GBENU_HOST_PORT_NUM 0 +#define GBENU_NUM_ALE_ENTRIES 1024 + /* 10G Ethernet SS defines */ #define XGBE_MODULE_NAME "netcp-xgbe" #define XGBE_SS_VERSION_10 0x4ee42100 -#define XGBE_SERDES_REG_INDEX 1 +#define XGBE_SS_REG_INDEX 0 +#define XGBE_SM_REG_INDEX 1 +#define XGBE_SERDES_REG_INDEX 2 + +/* offset relative to base of XGBE_SS_REG_INDEX */ #define XGBE10_SGMII_MODULE_OFFSET 0x100 -#define XGBE10_SWITCH_MODULE_OFFSET 0x1000 -#define XGBE10_HOST_PORT_OFFSET 0x1034 -#define XGBE10_SLAVE_PORT_OFFSET 0x1064 -#define XGBE10_EMAC_OFFSET 0x1400 -#define XGBE10_ALE_OFFSET 0x1700 -#define XGBE10_HW_STATS_OFFSET 0x1800 +/* offset relative to base of XGBE_SM_REG_INDEX */ +#define XGBE10_HOST_PORT_OFFSET 0x34 +#define XGBE10_SLAVE_PORT_OFFSET 0x64 +#define XGBE10_EMAC_OFFSET 0x400 +#define XGBE10_ALE_OFFSET 0x700 +#define XGBE10_HW_STATS_OFFSET 0x800 #define XGBE10_HOST_PORT_NUM 0 -#define XGBE10_NUM_SLAVES 2 -#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1) #define XGBE10_NUM_ALE_ENTRIES 1024 #define GBE_TIMER_INTERVAL (HZ / 2) @@ -88,7 +114,7 @@ #define MACSL_FULLDUPLEX BIT(0) #define GBE_CTL_P0_ENABLE BIT(2) -#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff +#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf #define GBE_STATS_CD_SEL BIT(28) @@ -108,11 +134,20 @@ #define GBE_STATSC_MODULE 2 #define GBE_STATSD_MODULE 3 +#define GBENU_STATS0_MODULE 0 +#define GBENU_STATS1_MODULE 1 +#define GBENU_STATS2_MODULE 2 +#define GBENU_STATS3_MODULE 3 +#define GBENU_STATS4_MODULE 4 +#define GBENU_STATS5_MODULE 5 +#define GBENU_STATS6_MODULE 6 +#define GBENU_STATS7_MODULE 7 +#define GBENU_STATS8_MODULE 8 + #define XGBE_STATS0_MODULE 0 #define XGBE_STATS1_MODULE 1 #define XGBE_STATS2_MODULE 2 -#define MAX_SLAVES GBE13_NUM_SLAVES /* s: 0-based slave_port */ #define SGMII_BASE(s) \ (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) @@ -125,10 +160,14 @@ #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ offsetof(struct gbe##_##rb, rn) +#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ + offsetof(struct gbenu##_##rb, rn) #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ offsetof(struct xgbe##_##rb, rn) #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn) +#define HOST_TX_PRI_MAP_DEFAULT 0x00000000 + struct xgbe_ss_regs { u32 id_ver; u32 synce_count; @@ -258,6 +297,192 @@ struct xgbe_hw_stats { #define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) +struct gbenu_ss_regs { + u32 id_ver; + u32 synce_count; /* NU */ + u32 synce_mux; /* NU */ + u32 control; /* 2U */ + u32 __rsvd_0[2]; /* 2U */ + u32 rgmii_status; /* 2U */ + u32 ss_status; /* 2U */ +}; + +struct gbenu_switch_regs { + u32 id_ver; + u32 control; + u32 __rsvd_0[2]; + u32 emcontrol; + u32 stat_port_en; + u32 ptype; /* NU */ + u32 soft_idle; + u32 thru_rate; /* NU */ + u32 gap_thresh; /* NU */ + u32 tx_start_wds; /* NU */ + u32 eee_prescale; /* 2U */ + u32 tx_g_oflow_thresh_set; /* NU */ + u32 tx_g_oflow_thresh_clr; /* NU */ + u32 tx_g_buf_thresh_set_l; /* NU */ + u32 tx_g_buf_thresh_set_h; /* NU */ + u32 tx_g_buf_thresh_clr_l; /* NU */ + u32 tx_g_buf_thresh_clr_h; /* NU */ +}; + +struct gbenu_port_regs { + u32 __rsvd_0; + u32 control; + u32 max_blks; /* 2U */ + u32 mem_align1; + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; /* NU */ + u32 pri_ctl; /* 2U */ + u32 rx_pri_map; + u32 rx_maxlen; + u32 tx_blks_pri; /* NU */ + u32 __rsvd_1; + u32 idle2lpi; /* 2U */ + u32 lpi2idle; /* 2U */ + u32 eee_status; /* 2U */ + u32 __rsvd_2; + u32 __rsvd_3[176]; /* NU: more to add */ + u32 __rsvd_4[2]; + u32 sa_lo; + u32 sa_hi; + u32 ts_ctl; + u32 ts_seq_ltype; + u32 ts_vlan; + u32 ts_ctl_ltype2; + u32 ts_ctl2; +}; + +struct gbenu_host_port_regs { + u32 __rsvd_0; + u32 control; + u32 flow_id_offset; /* 2U */ + u32 __rsvd_1; + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; /* NU */ + u32 pri_ctl; + u32 rx_pri_map; + u32 rx_maxlen; + u32 tx_blks_pri; /* NU */ + u32 __rsvd_2; + u32 idle2lpi; /* 2U */ + u32 lpi2wake; /* 2U */ + u32 eee_status; /* 2U */ + u32 __rsvd_3; + u32 __rsvd_4[184]; /* NU */ + u32 host_blks_pri; /* NU */ +}; + +struct gbenu_emac_regs { + u32 mac_control; + u32 mac_status; + u32 soft_reset; + u32 boff_test; + u32 rx_pause; + u32 __rsvd_0[11]; /* NU */ + u32 tx_pause; + u32 __rsvd_1[11]; /* NU */ + u32 em_control; + u32 tx_gap; +}; + +/* Some hw stat regs are applicable to slave port only. + * This is handled by gbenu_et_stats struct. Also some + * are for SS version NU and some are for 2U. + */ +struct gbenu_hw_stats { + u32 rx_good_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; /* slave */ + u32 rx_crc_errors; + u32 rx_align_code_errors; /* slave */ + u32 rx_oversized_frames; + u32 rx_jabber_frames; /* slave */ + u32 rx_undersized_frames; + u32 rx_fragments; /* slave */ + u32 ale_drop; + u32 ale_overrun_drop; + u32 rx_bytes; + u32 tx_good_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; /* slave */ + u32 tx_deferred_frames; /* slave */ + u32 tx_collision_frames; /* slave */ + u32 tx_single_coll_frames; /* slave */ + u32 tx_mult_coll_frames; /* slave */ + u32 tx_excessive_collisions; /* slave */ + u32 tx_late_collisions; /* slave */ + u32 rx_ipg_error; /* slave 10G only */ + u32 tx_carrier_sense_errors; /* slave */ + u32 tx_bytes; + u32 tx_64B_frames; + u32 tx_65_to_127B_frames; + u32 tx_128_to_255B_frames; + u32 tx_256_to_511B_frames; + u32 tx_512_to_1023B_frames; + u32 tx_1024B_frames; + u32 net_bytes; + u32 rx_bottom_fifo_drop; + u32 rx_port_mask_drop; + u32 rx_top_fifo_drop; + u32 ale_rate_limit_drop; + u32 ale_vid_ingress_drop; + u32 ale_da_eq_sa_drop; + u32 __rsvd_0[3]; + u32 ale_unknown_ucast; + u32 ale_unknown_ucast_bytes; + u32 ale_unknown_mcast; + u32 ale_unknown_mcast_bytes; + u32 ale_unknown_bcast; + u32 ale_unknown_bcast_bytes; + u32 ale_pol_match; + u32 ale_pol_match_red; /* NU */ + u32 ale_pol_match_yellow; /* NU */ + u32 __rsvd_1[44]; + u32 tx_mem_protect_err; + /* following NU only */ + u32 tx_pri0; + u32 tx_pri1; + u32 tx_pri2; + u32 tx_pri3; + u32 tx_pri4; + u32 tx_pri5; + u32 tx_pri6; + u32 tx_pri7; + u32 tx_pri0_bcnt; + u32 tx_pri1_bcnt; + u32 tx_pri2_bcnt; + u32 tx_pri3_bcnt; + u32 tx_pri4_bcnt; + u32 tx_pri5_bcnt; + u32 tx_pri6_bcnt; + u32 tx_pri7_bcnt; + u32 tx_pri0_drop; + u32 tx_pri1_drop; + u32 tx_pri2_drop; + u32 tx_pri3_drop; + u32 tx_pri4_drop; + u32 tx_pri5_drop; + u32 tx_pri6_drop; + u32 tx_pri7_drop; + u32 tx_pri0_drop_bcnt; + u32 tx_pri1_drop_bcnt; + u32 tx_pri2_drop_bcnt; + u32 tx_pri3_drop_bcnt; + u32 tx_pri4_drop_bcnt; + u32 tx_pri5_drop_bcnt; + u32 tx_pri6_drop_bcnt; + u32 tx_pri7_drop_bcnt; +}; + +#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32)) +#define GBENU_HW_STATS_REG_MAP_SZ 0x200 + struct gbe_ss_regs { u32 id_ver; u32 synce_count; @@ -316,6 +541,7 @@ struct gbe_port_regs_ofs { u16 ts_vlan; u16 ts_ctl_ltype2; u16 ts_ctl2; + u16 rx_maxlen; /* 2U, NU */ }; struct gbe_host_port_regs { @@ -390,9 +616,7 @@ struct gbe_hw_stats { }; #define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) -#define GBE13_NUM_HW_STATS_MOD 2 -#define XGBE10_NUM_HW_STATS_MOD 3 -#define GBE_MAX_HW_STAT_MODS 3 +#define GBE_MAX_HW_STAT_MODS 9 #define GBE_HW_STATS_REG_MAP_SZ 0x100 struct gbe_slave { @@ -420,11 +644,14 @@ struct gbe_priv { u32 ale_entries; u32 ale_ports; bool enable_ale; + u8 max_num_slaves; + u8 max_num_ports; /* max_num_slaves + 1 */ struct netcp_tx_pipe tx_pipe; int host_port; u32 rx_packet_max; u32 ss_version; + u32 stats_en_mask; void __iomem *ss_regs; void __iomem *switch_regs; @@ -475,275 +702,778 @@ struct netcp_ethtool_stat { int offset; }; -#define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSA_INFO(field) \ +{ \ + "GBE_A:"#field, GBE_STATSA_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} -#define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSB_INFO(field) \ +{ \ + "GBE_B:"#field, GBE_STATSB_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} -#define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSC_INFO(field) \ +{ \ + "GBE_C:"#field, GBE_STATSC_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} -#define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSD_INFO(field) \ +{ \ + "GBE_D:"#field, GBE_STATSD_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} static const struct netcp_ethtool_stat gbe13_et_stats[] = { /* GBE module A */ - {GBE_STATSA_INFO(rx_good_frames)}, - {GBE_STATSA_INFO(rx_broadcast_frames)}, - {GBE_STATSA_INFO(rx_multicast_frames)}, - {GBE_STATSA_INFO(rx_pause_frames)}, - {GBE_STATSA_INFO(rx_crc_errors)}, - {GBE_STATSA_INFO(rx_align_code_errors)}, - {GBE_STATSA_INFO(rx_oversized_frames)}, - {GBE_STATSA_INFO(rx_jabber_frames)}, - {GBE_STATSA_INFO(rx_undersized_frames)}, - {GBE_STATSA_INFO(rx_fragments)}, - {GBE_STATSA_INFO(rx_bytes)}, - {GBE_STATSA_INFO(tx_good_frames)}, - {GBE_STATSA_INFO(tx_broadcast_frames)}, - {GBE_STATSA_INFO(tx_multicast_frames)}, - {GBE_STATSA_INFO(tx_pause_frames)}, - {GBE_STATSA_INFO(tx_deferred_frames)}, - {GBE_STATSA_INFO(tx_collision_frames)}, - {GBE_STATSA_INFO(tx_single_coll_frames)}, - {GBE_STATSA_INFO(tx_mult_coll_frames)}, - {GBE_STATSA_INFO(tx_excessive_collisions)}, - {GBE_STATSA_INFO(tx_late_collisions)}, - {GBE_STATSA_INFO(tx_underrun)}, - {GBE_STATSA_INFO(tx_carrier_sense_errors)}, - {GBE_STATSA_INFO(tx_bytes)}, - {GBE_STATSA_INFO(tx_64byte_frames)}, - {GBE_STATSA_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSA_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSA_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSA_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSA_INFO(tx_1024byte_frames)}, - {GBE_STATSA_INFO(net_bytes)}, - {GBE_STATSA_INFO(rx_sof_overruns)}, - {GBE_STATSA_INFO(rx_mof_overruns)}, - {GBE_STATSA_INFO(rx_dma_overruns)}, + GBE_STATSA_INFO(rx_good_frames), + GBE_STATSA_INFO(rx_broadcast_frames), + GBE_STATSA_INFO(rx_multicast_frames), + GBE_STATSA_INFO(rx_pause_frames), + GBE_STATSA_INFO(rx_crc_errors), + GBE_STATSA_INFO(rx_align_code_errors), + GBE_STATSA_INFO(rx_oversized_frames), + GBE_STATSA_INFO(rx_jabber_frames), + GBE_STATSA_INFO(rx_undersized_frames), + GBE_STATSA_INFO(rx_fragments), + GBE_STATSA_INFO(rx_bytes), + GBE_STATSA_INFO(tx_good_frames), + GBE_STATSA_INFO(tx_broadcast_frames), + GBE_STATSA_INFO(tx_multicast_frames), + GBE_STATSA_INFO(tx_pause_frames), + GBE_STATSA_INFO(tx_deferred_frames), + GBE_STATSA_INFO(tx_collision_frames), + GBE_STATSA_INFO(tx_single_coll_frames), + GBE_STATSA_INFO(tx_mult_coll_frames), + GBE_STATSA_INFO(tx_excessive_collisions), + GBE_STATSA_INFO(tx_late_collisions), + GBE_STATSA_INFO(tx_underrun), + GBE_STATSA_INFO(tx_carrier_sense_errors), + GBE_STATSA_INFO(tx_bytes), + GBE_STATSA_INFO(tx_64byte_frames), + GBE_STATSA_INFO(tx_65_to_127byte_frames), + GBE_STATSA_INFO(tx_128_to_255byte_frames), + GBE_STATSA_INFO(tx_256_to_511byte_frames), + GBE_STATSA_INFO(tx_512_to_1023byte_frames), + GBE_STATSA_INFO(tx_1024byte_frames), + GBE_STATSA_INFO(net_bytes), + GBE_STATSA_INFO(rx_sof_overruns), + GBE_STATSA_INFO(rx_mof_overruns), + GBE_STATSA_INFO(rx_dma_overruns), /* GBE module B */ - {GBE_STATSB_INFO(rx_good_frames)}, - {GBE_STATSB_INFO(rx_broadcast_frames)}, - {GBE_STATSB_INFO(rx_multicast_frames)}, - {GBE_STATSB_INFO(rx_pause_frames)}, - {GBE_STATSB_INFO(rx_crc_errors)}, - {GBE_STATSB_INFO(rx_align_code_errors)}, - {GBE_STATSB_INFO(rx_oversized_frames)}, - {GBE_STATSB_INFO(rx_jabber_frames)}, - {GBE_STATSB_INFO(rx_undersized_frames)}, - {GBE_STATSB_INFO(rx_fragments)}, - {GBE_STATSB_INFO(rx_bytes)}, - {GBE_STATSB_INFO(tx_good_frames)}, - {GBE_STATSB_INFO(tx_broadcast_frames)}, - {GBE_STATSB_INFO(tx_multicast_frames)}, - {GBE_STATSB_INFO(tx_pause_frames)}, - {GBE_STATSB_INFO(tx_deferred_frames)}, - {GBE_STATSB_INFO(tx_collision_frames)}, - {GBE_STATSB_INFO(tx_single_coll_frames)}, - {GBE_STATSB_INFO(tx_mult_coll_frames)}, - {GBE_STATSB_INFO(tx_excessive_collisions)}, - {GBE_STATSB_INFO(tx_late_collisions)}, - {GBE_STATSB_INFO(tx_underrun)}, - {GBE_STATSB_INFO(tx_carrier_sense_errors)}, - {GBE_STATSB_INFO(tx_bytes)}, - {GBE_STATSB_INFO(tx_64byte_frames)}, - {GBE_STATSB_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSB_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSB_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSB_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSB_INFO(tx_1024byte_frames)}, - {GBE_STATSB_INFO(net_bytes)}, - {GBE_STATSB_INFO(rx_sof_overruns)}, - {GBE_STATSB_INFO(rx_mof_overruns)}, - {GBE_STATSB_INFO(rx_dma_overruns)}, + GBE_STATSB_INFO(rx_good_frames), + GBE_STATSB_INFO(rx_broadcast_frames), + GBE_STATSB_INFO(rx_multicast_frames), + GBE_STATSB_INFO(rx_pause_frames), + GBE_STATSB_INFO(rx_crc_errors), + GBE_STATSB_INFO(rx_align_code_errors), + GBE_STATSB_INFO(rx_oversized_frames), + GBE_STATSB_INFO(rx_jabber_frames), + GBE_STATSB_INFO(rx_undersized_frames), + GBE_STATSB_INFO(rx_fragments), + GBE_STATSB_INFO(rx_bytes), + GBE_STATSB_INFO(tx_good_frames), + GBE_STATSB_INFO(tx_broadcast_frames), + GBE_STATSB_INFO(tx_multicast_frames), + GBE_STATSB_INFO(tx_pause_frames), + GBE_STATSB_INFO(tx_deferred_frames), + GBE_STATSB_INFO(tx_collision_frames), + GBE_STATSB_INFO(tx_single_coll_frames), + GBE_STATSB_INFO(tx_mult_coll_frames), + GBE_STATSB_INFO(tx_excessive_collisions), + GBE_STATSB_INFO(tx_late_collisions), + GBE_STATSB_INFO(tx_underrun), + GBE_STATSB_INFO(tx_carrier_sense_errors), + GBE_STATSB_INFO(tx_bytes), + GBE_STATSB_INFO(tx_64byte_frames), + GBE_STATSB_INFO(tx_65_to_127byte_frames), + GBE_STATSB_INFO(tx_128_to_255byte_frames), + GBE_STATSB_INFO(tx_256_to_511byte_frames), + GBE_STATSB_INFO(tx_512_to_1023byte_frames), + GBE_STATSB_INFO(tx_1024byte_frames), + GBE_STATSB_INFO(net_bytes), + GBE_STATSB_INFO(rx_sof_overruns), + GBE_STATSB_INFO(rx_mof_overruns), + GBE_STATSB_INFO(rx_dma_overruns), /* GBE module C */ - {GBE_STATSC_INFO(rx_good_frames)}, - {GBE_STATSC_INFO(rx_broadcast_frames)}, - {GBE_STATSC_INFO(rx_multicast_frames)}, - {GBE_STATSC_INFO(rx_pause_frames)}, - {GBE_STATSC_INFO(rx_crc_errors)}, - {GBE_STATSC_INFO(rx_align_code_errors)}, - {GBE_STATSC_INFO(rx_oversized_frames)}, - {GBE_STATSC_INFO(rx_jabber_frames)}, - {GBE_STATSC_INFO(rx_undersized_frames)}, - {GBE_STATSC_INFO(rx_fragments)}, - {GBE_STATSC_INFO(rx_bytes)}, - {GBE_STATSC_INFO(tx_good_frames)}, - {GBE_STATSC_INFO(tx_broadcast_frames)}, - {GBE_STATSC_INFO(tx_multicast_frames)}, - {GBE_STATSC_INFO(tx_pause_frames)}, - {GBE_STATSC_INFO(tx_deferred_frames)}, - {GBE_STATSC_INFO(tx_collision_frames)}, - {GBE_STATSC_INFO(tx_single_coll_frames)}, - {GBE_STATSC_INFO(tx_mult_coll_frames)}, - {GBE_STATSC_INFO(tx_excessive_collisions)}, - {GBE_STATSC_INFO(tx_late_collisions)}, - {GBE_STATSC_INFO(tx_underrun)}, - {GBE_STATSC_INFO(tx_carrier_sense_errors)}, - {GBE_STATSC_INFO(tx_bytes)}, - {GBE_STATSC_INFO(tx_64byte_frames)}, - {GBE_STATSC_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSC_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSC_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSC_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSC_INFO(tx_1024byte_frames)}, - {GBE_STATSC_INFO(net_bytes)}, - {GBE_STATSC_INFO(rx_sof_overruns)}, - {GBE_STATSC_INFO(rx_mof_overruns)}, - {GBE_STATSC_INFO(rx_dma_overruns)}, + GBE_STATSC_INFO(rx_good_frames), + GBE_STATSC_INFO(rx_broadcast_frames), + GBE_STATSC_INFO(rx_multicast_frames), + GBE_STATSC_INFO(rx_pause_frames), + GBE_STATSC_INFO(rx_crc_errors), + GBE_STATSC_INFO(rx_align_code_errors), + GBE_STATSC_INFO(rx_oversized_frames), + GBE_STATSC_INFO(rx_jabber_frames), + GBE_STATSC_INFO(rx_undersized_frames), + GBE_STATSC_INFO(rx_fragments), + GBE_STATSC_INFO(rx_bytes), + GBE_STATSC_INFO(tx_good_frames), + GBE_STATSC_INFO(tx_broadcast_frames), + GBE_STATSC_INFO(tx_multicast_frames), + GBE_STATSC_INFO(tx_pause_frames), + GBE_STATSC_INFO(tx_deferred_frames), + GBE_STATSC_INFO(tx_collision_frames), + GBE_STATSC_INFO(tx_single_coll_frames), + GBE_STATSC_INFO(tx_mult_coll_frames), + GBE_STATSC_INFO(tx_excessive_collisions), + GBE_STATSC_INFO(tx_late_collisions), + GBE_STATSC_INFO(tx_underrun), + GBE_STATSC_INFO(tx_carrier_sense_errors), + GBE_STATSC_INFO(tx_bytes), + GBE_STATSC_INFO(tx_64byte_frames), + GBE_STATSC_INFO(tx_65_to_127byte_frames), + GBE_STATSC_INFO(tx_128_to_255byte_frames), + GBE_STATSC_INFO(tx_256_to_511byte_frames), + GBE_STATSC_INFO(tx_512_to_1023byte_frames), + GBE_STATSC_INFO(tx_1024byte_frames), + GBE_STATSC_INFO(net_bytes), + GBE_STATSC_INFO(rx_sof_overruns), + GBE_STATSC_INFO(rx_mof_overruns), + GBE_STATSC_INFO(rx_dma_overruns), /* GBE module D */ - {GBE_STATSD_INFO(rx_good_frames)}, - {GBE_STATSD_INFO(rx_broadcast_frames)}, - {GBE_STATSD_INFO(rx_multicast_frames)}, - {GBE_STATSD_INFO(rx_pause_frames)}, - {GBE_STATSD_INFO(rx_crc_errors)}, - {GBE_STATSD_INFO(rx_align_code_errors)}, - {GBE_STATSD_INFO(rx_oversized_frames)}, - {GBE_STATSD_INFO(rx_jabber_frames)}, - {GBE_STATSD_INFO(rx_undersized_frames)}, - {GBE_STATSD_INFO(rx_fragments)}, - {GBE_STATSD_INFO(rx_bytes)}, - {GBE_STATSD_INFO(tx_good_frames)}, - {GBE_STATSD_INFO(tx_broadcast_frames)}, - {GBE_STATSD_INFO(tx_multicast_frames)}, - {GBE_STATSD_INFO(tx_pause_frames)}, - {GBE_STATSD_INFO(tx_deferred_frames)}, - {GBE_STATSD_INFO(tx_collision_frames)}, - {GBE_STATSD_INFO(tx_single_coll_frames)}, - {GBE_STATSD_INFO(tx_mult_coll_frames)}, - {GBE_STATSD_INFO(tx_excessive_collisions)}, - {GBE_STATSD_INFO(tx_late_collisions)}, - {GBE_STATSD_INFO(tx_underrun)}, - {GBE_STATSD_INFO(tx_carrier_sense_errors)}, - {GBE_STATSD_INFO(tx_bytes)}, - {GBE_STATSD_INFO(tx_64byte_frames)}, - {GBE_STATSD_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSD_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSD_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSD_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSD_INFO(tx_1024byte_frames)}, - {GBE_STATSD_INFO(net_bytes)}, - {GBE_STATSD_INFO(rx_sof_overruns)}, - {GBE_STATSD_INFO(rx_mof_overruns)}, - {GBE_STATSD_INFO(rx_dma_overruns)}, + GBE_STATSD_INFO(rx_good_frames), + GBE_STATSD_INFO(rx_broadcast_frames), + GBE_STATSD_INFO(rx_multicast_frames), + GBE_STATSD_INFO(rx_pause_frames), + GBE_STATSD_INFO(rx_crc_errors), + GBE_STATSD_INFO(rx_align_code_errors), + GBE_STATSD_INFO(rx_oversized_frames), + GBE_STATSD_INFO(rx_jabber_frames), + GBE_STATSD_INFO(rx_undersized_frames), + GBE_STATSD_INFO(rx_fragments), + GBE_STATSD_INFO(rx_bytes), + GBE_STATSD_INFO(tx_good_frames), + GBE_STATSD_INFO(tx_broadcast_frames), + GBE_STATSD_INFO(tx_multicast_frames), + GBE_STATSD_INFO(tx_pause_frames), + GBE_STATSD_INFO(tx_deferred_frames), + GBE_STATSD_INFO(tx_collision_frames), + GBE_STATSD_INFO(tx_single_coll_frames), + GBE_STATSD_INFO(tx_mult_coll_frames), + GBE_STATSD_INFO(tx_excessive_collisions), + GBE_STATSD_INFO(tx_late_collisions), + GBE_STATSD_INFO(tx_underrun), + GBE_STATSD_INFO(tx_carrier_sense_errors), + GBE_STATSD_INFO(tx_bytes), + GBE_STATSD_INFO(tx_64byte_frames), + GBE_STATSD_INFO(tx_65_to_127byte_frames), + GBE_STATSD_INFO(tx_128_to_255byte_frames), + GBE_STATSD_INFO(tx_256_to_511byte_frames), + GBE_STATSD_INFO(tx_512_to_1023byte_frames), + GBE_STATSD_INFO(tx_1024byte_frames), + GBE_STATSD_INFO(net_bytes), + GBE_STATSD_INFO(rx_sof_overruns), + GBE_STATSD_INFO(rx_mof_overruns), + GBE_STATSD_INFO(rx_dma_overruns), }; -#define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ - offsetof(struct xgbe_hw_stats, field) +/* This is the size of entries in GBENU_STATS_HOST */ +#define GBENU_ET_STATS_HOST_SIZE 33 + +#define GBENU_STATS_HOST(field) \ +{ \ + "GBE_HOST:"#field, GBENU_STATS0_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} -#define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ - offsetof(struct xgbe_hw_stats, field) +/* This is the size of entries in GBENU_STATS_HOST */ +#define GBENU_ET_STATS_PORT_SIZE 46 -#define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ - offsetof(struct xgbe_hw_stats, field) +#define GBENU_STATS_P1(field) \ +{ \ + "GBE_P1:"#field, GBENU_STATS1_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P2(field) \ +{ \ + "GBE_P2:"#field, GBENU_STATS2_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P3(field) \ +{ \ + "GBE_P3:"#field, GBENU_STATS3_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P4(field) \ +{ \ + "GBE_P4:"#field, GBENU_STATS4_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P5(field) \ +{ \ + "GBE_P5:"#field, GBENU_STATS5_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P6(field) \ +{ \ + "GBE_P6:"#field, GBENU_STATS6_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P7(field) \ +{ \ + "GBE_P7:"#field, GBENU_STATS7_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P8(field) \ +{ \ + "GBE_P8:"#field, GBENU_STATS8_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +static const struct netcp_ethtool_stat gbenu_et_stats[] = { + /* GBENU Host Module */ + GBENU_STATS_HOST(rx_good_frames), + GBENU_STATS_HOST(rx_broadcast_frames), + GBENU_STATS_HOST(rx_multicast_frames), + GBENU_STATS_HOST(rx_crc_errors), + GBENU_STATS_HOST(rx_oversized_frames), + GBENU_STATS_HOST(rx_undersized_frames), + GBENU_STATS_HOST(ale_drop), + GBENU_STATS_HOST(ale_overrun_drop), + GBENU_STATS_HOST(rx_bytes), + GBENU_STATS_HOST(tx_good_frames), + GBENU_STATS_HOST(tx_broadcast_frames), + GBENU_STATS_HOST(tx_multicast_frames), + GBENU_STATS_HOST(tx_bytes), + GBENU_STATS_HOST(tx_64B_frames), + GBENU_STATS_HOST(tx_65_to_127B_frames), + GBENU_STATS_HOST(tx_128_to_255B_frames), + GBENU_STATS_HOST(tx_256_to_511B_frames), + GBENU_STATS_HOST(tx_512_to_1023B_frames), + GBENU_STATS_HOST(tx_1024B_frames), + GBENU_STATS_HOST(net_bytes), + GBENU_STATS_HOST(rx_bottom_fifo_drop), + GBENU_STATS_HOST(rx_port_mask_drop), + GBENU_STATS_HOST(rx_top_fifo_drop), + GBENU_STATS_HOST(ale_rate_limit_drop), + GBENU_STATS_HOST(ale_vid_ingress_drop), + GBENU_STATS_HOST(ale_da_eq_sa_drop), + GBENU_STATS_HOST(ale_unknown_ucast), + GBENU_STATS_HOST(ale_unknown_ucast_bytes), + GBENU_STATS_HOST(ale_unknown_mcast), + GBENU_STATS_HOST(ale_unknown_mcast_bytes), + GBENU_STATS_HOST(ale_unknown_bcast), + GBENU_STATS_HOST(ale_unknown_bcast_bytes), + GBENU_STATS_HOST(tx_mem_protect_err), + /* GBENU Module 1 */ + GBENU_STATS_P1(rx_good_frames), + GBENU_STATS_P1(rx_broadcast_frames), + GBENU_STATS_P1(rx_multicast_frames), + GBENU_STATS_P1(rx_pause_frames), + GBENU_STATS_P1(rx_crc_errors), + GBENU_STATS_P1(rx_align_code_errors), + GBENU_STATS_P1(rx_oversized_frames), + GBENU_STATS_P1(rx_jabber_frames), + GBENU_STATS_P1(rx_undersized_frames), + GBENU_STATS_P1(rx_fragments), + GBENU_STATS_P1(ale_drop), + GBENU_STATS_P1(ale_overrun_drop), + GBENU_STATS_P1(rx_bytes), + GBENU_STATS_P1(tx_good_frames), + GBENU_STATS_P1(tx_broadcast_frames), + GBENU_STATS_P1(tx_multicast_frames), + GBENU_STATS_P1(tx_pause_frames), + GBENU_STATS_P1(tx_deferred_frames), + GBENU_STATS_P1(tx_collision_frames), + GBENU_STATS_P1(tx_single_coll_frames), + GBENU_STATS_P1(tx_mult_coll_frames), + GBENU_STATS_P1(tx_excessive_collisions), + GBENU_STATS_P1(tx_late_collisions), + GBENU_STATS_P1(rx_ipg_error), + GBENU_STATS_P1(tx_carrier_sense_errors), + GBENU_STATS_P1(tx_bytes), + GBENU_STATS_P1(tx_64B_frames), + GBENU_STATS_P1(tx_65_to_127B_frames), + GBENU_STATS_P1(tx_128_to_255B_frames), + GBENU_STATS_P1(tx_256_to_511B_frames), + GBENU_STATS_P1(tx_512_to_1023B_frames), + GBENU_STATS_P1(tx_1024B_frames), + GBENU_STATS_P1(net_bytes), + GBENU_STATS_P1(rx_bottom_fifo_drop), + GBENU_STATS_P1(rx_port_mask_drop), + GBENU_STATS_P1(rx_top_fifo_drop), + GBENU_STATS_P1(ale_rate_limit_drop), + GBENU_STATS_P1(ale_vid_ingress_drop), + GBENU_STATS_P1(ale_da_eq_sa_drop), + GBENU_STATS_P1(ale_unknown_ucast), + GBENU_STATS_P1(ale_unknown_ucast_bytes), + GBENU_STATS_P1(ale_unknown_mcast), + GBENU_STATS_P1(ale_unknown_mcast_bytes), + GBENU_STATS_P1(ale_unknown_bcast), + GBENU_STATS_P1(ale_unknown_bcast_bytes), + GBENU_STATS_P1(tx_mem_protect_err), + /* GBENU Module 2 */ + GBENU_STATS_P2(rx_good_frames), + GBENU_STATS_P2(rx_broadcast_frames), + GBENU_STATS_P2(rx_multicast_frames), + GBENU_STATS_P2(rx_pause_frames), + GBENU_STATS_P2(rx_crc_errors), + GBENU_STATS_P2(rx_align_code_errors), + GBENU_STATS_P2(rx_oversized_frames), + GBENU_STATS_P2(rx_jabber_frames), + GBENU_STATS_P2(rx_undersized_frames), + GBENU_STATS_P2(rx_fragments), + GBENU_STATS_P2(ale_drop), + GBENU_STATS_P2(ale_overrun_drop), + GBENU_STATS_P2(rx_bytes), + GBENU_STATS_P2(tx_good_frames), + GBENU_STATS_P2(tx_broadcast_frames), + GBENU_STATS_P2(tx_multicast_frames), + GBENU_STATS_P2(tx_pause_frames), + GBENU_STATS_P2(tx_deferred_frames), + GBENU_STATS_P2(tx_collision_frames), + GBENU_STATS_P2(tx_single_coll_frames), + GBENU_STATS_P2(tx_mult_coll_frames), + GBENU_STATS_P2(tx_excessive_collisions), + GBENU_STATS_P2(tx_late_collisions), + GBENU_STATS_P2(rx_ipg_error), + GBENU_STATS_P2(tx_carrier_sense_errors), + GBENU_STATS_P2(tx_bytes), + GBENU_STATS_P2(tx_64B_frames), + GBENU_STATS_P2(tx_65_to_127B_frames), + GBENU_STATS_P2(tx_128_to_255B_frames), + GBENU_STATS_P2(tx_256_to_511B_frames), + GBENU_STATS_P2(tx_512_to_1023B_frames), + GBENU_STATS_P2(tx_1024B_frames), + GBENU_STATS_P2(net_bytes), + GBENU_STATS_P2(rx_bottom_fifo_drop), + GBENU_STATS_P2(rx_port_mask_drop), + GBENU_STATS_P2(rx_top_fifo_drop), + GBENU_STATS_P2(ale_rate_limit_drop), + GBENU_STATS_P2(ale_vid_ingress_drop), + GBENU_STATS_P2(ale_da_eq_sa_drop), + GBENU_STATS_P2(ale_unknown_ucast), + GBENU_STATS_P2(ale_unknown_ucast_bytes), + GBENU_STATS_P2(ale_unknown_mcast), + GBENU_STATS_P2(ale_unknown_mcast_bytes), + GBENU_STATS_P2(ale_unknown_bcast), + GBENU_STATS_P2(ale_unknown_bcast_bytes), + GBENU_STATS_P2(tx_mem_protect_err), + /* GBENU Module 3 */ + GBENU_STATS_P3(rx_good_frames), + GBENU_STATS_P3(rx_broadcast_frames), + GBENU_STATS_P3(rx_multicast_frames), + GBENU_STATS_P3(rx_pause_frames), + GBENU_STATS_P3(rx_crc_errors), + GBENU_STATS_P3(rx_align_code_errors), + GBENU_STATS_P3(rx_oversized_frames), + GBENU_STATS_P3(rx_jabber_frames), + GBENU_STATS_P3(rx_undersized_frames), + GBENU_STATS_P3(rx_fragments), + GBENU_STATS_P3(ale_drop), + GBENU_STATS_P3(ale_overrun_drop), + GBENU_STATS_P3(rx_bytes), + GBENU_STATS_P3(tx_good_frames), + GBENU_STATS_P3(tx_broadcast_frames), + GBENU_STATS_P3(tx_multicast_frames), + GBENU_STATS_P3(tx_pause_frames), + GBENU_STATS_P3(tx_deferred_frames), + GBENU_STATS_P3(tx_collision_frames), + GBENU_STATS_P3(tx_single_coll_frames), + GBENU_STATS_P3(tx_mult_coll_frames), + GBENU_STATS_P3(tx_excessive_collisions), + GBENU_STATS_P3(tx_late_collisions), + GBENU_STATS_P3(rx_ipg_error), + GBENU_STATS_P3(tx_carrier_sense_errors), + GBENU_STATS_P3(tx_bytes), + GBENU_STATS_P3(tx_64B_frames), + GBENU_STATS_P3(tx_65_to_127B_frames), + GBENU_STATS_P3(tx_128_to_255B_frames), + GBENU_STATS_P3(tx_256_to_511B_frames), + GBENU_STATS_P3(tx_512_to_1023B_frames), + GBENU_STATS_P3(tx_1024B_frames), + GBENU_STATS_P3(net_bytes), + GBENU_STATS_P3(rx_bottom_fifo_drop), + GBENU_STATS_P3(rx_port_mask_drop), + GBENU_STATS_P3(rx_top_fifo_drop), + GBENU_STATS_P3(ale_rate_limit_drop), + GBENU_STATS_P3(ale_vid_ingress_drop), + GBENU_STATS_P3(ale_da_eq_sa_drop), + GBENU_STATS_P3(ale_unknown_ucast), + GBENU_STATS_P3(ale_unknown_ucast_bytes), + GBENU_STATS_P3(ale_unknown_mcast), + GBENU_STATS_P3(ale_unknown_mcast_bytes), + GBENU_STATS_P3(ale_unknown_bcast), + GBENU_STATS_P3(ale_unknown_bcast_bytes), + GBENU_STATS_P3(tx_mem_protect_err), + /* GBENU Module 4 */ + GBENU_STATS_P4(rx_good_frames), + GBENU_STATS_P4(rx_broadcast_frames), + GBENU_STATS_P4(rx_multicast_frames), + GBENU_STATS_P4(rx_pause_frames), + GBENU_STATS_P4(rx_crc_errors), + GBENU_STATS_P4(rx_align_code_errors), + GBENU_STATS_P4(rx_oversized_frames), + GBENU_STATS_P4(rx_jabber_frames), + GBENU_STATS_P4(rx_undersized_frames), + GBENU_STATS_P4(rx_fragments), + GBENU_STATS_P4(ale_drop), + GBENU_STATS_P4(ale_overrun_drop), + GBENU_STATS_P4(rx_bytes), + GBENU_STATS_P4(tx_good_frames), + GBENU_STATS_P4(tx_broadcast_frames), + GBENU_STATS_P4(tx_multicast_frames), + GBENU_STATS_P4(tx_pause_frames), + GBENU_STATS_P4(tx_deferred_frames), + GBENU_STATS_P4(tx_collision_frames), + GBENU_STATS_P4(tx_single_coll_frames), + GBENU_STATS_P4(tx_mult_coll_frames), + GBENU_STATS_P4(tx_excessive_collisions), + GBENU_STATS_P4(tx_late_collisions), + GBENU_STATS_P4(rx_ipg_error), + GBENU_STATS_P4(tx_carrier_sense_errors), + GBENU_STATS_P4(tx_bytes), + GBENU_STATS_P4(tx_64B_frames), + GBENU_STATS_P4(tx_65_to_127B_frames), + GBENU_STATS_P4(tx_128_to_255B_frames), + GBENU_STATS_P4(tx_256_to_511B_frames), + GBENU_STATS_P4(tx_512_to_1023B_frames), + GBENU_STATS_P4(tx_1024B_frames), + GBENU_STATS_P4(net_bytes), + GBENU_STATS_P4(rx_bottom_fifo_drop), + GBENU_STATS_P4(rx_port_mask_drop), + GBENU_STATS_P4(rx_top_fifo_drop), + GBENU_STATS_P4(ale_rate_limit_drop), + GBENU_STATS_P4(ale_vid_ingress_drop), + GBENU_STATS_P4(ale_da_eq_sa_drop), + GBENU_STATS_P4(ale_unknown_ucast), + GBENU_STATS_P4(ale_unknown_ucast_bytes), + GBENU_STATS_P4(ale_unknown_mcast), + GBENU_STATS_P4(ale_unknown_mcast_bytes), + GBENU_STATS_P4(ale_unknown_bcast), + GBENU_STATS_P4(ale_unknown_bcast_bytes), + GBENU_STATS_P4(tx_mem_protect_err), + /* GBENU Module 5 */ + GBENU_STATS_P5(rx_good_frames), + GBENU_STATS_P5(rx_broadcast_frames), + GBENU_STATS_P5(rx_multicast_frames), + GBENU_STATS_P5(rx_pause_frames), + GBENU_STATS_P5(rx_crc_errors), + GBENU_STATS_P5(rx_align_code_errors), + GBENU_STATS_P5(rx_oversized_frames), + GBENU_STATS_P5(rx_jabber_frames), + GBENU_STATS_P5(rx_undersized_frames), + GBENU_STATS_P5(rx_fragments), + GBENU_STATS_P5(ale_drop), + GBENU_STATS_P5(ale_overrun_drop), + GBENU_STATS_P5(rx_bytes), + GBENU_STATS_P5(tx_good_frames), + GBENU_STATS_P5(tx_broadcast_frames), + GBENU_STATS_P5(tx_multicast_frames), + GBENU_STATS_P5(tx_pause_frames), + GBENU_STATS_P5(tx_deferred_frames), + GBENU_STATS_P5(tx_collision_frames), + GBENU_STATS_P5(tx_single_coll_frames), + GBENU_STATS_P5(tx_mult_coll_frames), + GBENU_STATS_P5(tx_excessive_collisions), + GBENU_STATS_P5(tx_late_collisions), + GBENU_STATS_P5(rx_ipg_error), + GBENU_STATS_P5(tx_carrier_sense_errors), + GBENU_STATS_P5(tx_bytes), + GBENU_STATS_P5(tx_64B_frames), + GBENU_STATS_P5(tx_65_to_127B_frames), + GBENU_STATS_P5(tx_128_to_255B_frames), + GBENU_STATS_P5(tx_256_to_511B_frames), + GBENU_STATS_P5(tx_512_to_1023B_frames), + GBENU_STATS_P5(tx_1024B_frames), + GBENU_STATS_P5(net_bytes), + GBENU_STATS_P5(rx_bottom_fifo_drop), + GBENU_STATS_P5(rx_port_mask_drop), + GBENU_STATS_P5(rx_top_fifo_drop), + GBENU_STATS_P5(ale_rate_limit_drop), + GBENU_STATS_P5(ale_vid_ingress_drop), + GBENU_STATS_P5(ale_da_eq_sa_drop), + GBENU_STATS_P5(ale_unknown_ucast), + GBENU_STATS_P5(ale_unknown_ucast_bytes), + GBENU_STATS_P5(ale_unknown_mcast), + GBENU_STATS_P5(ale_unknown_mcast_bytes), + GBENU_STATS_P5(ale_unknown_bcast), + GBENU_STATS_P5(ale_unknown_bcast_bytes), + GBENU_STATS_P5(tx_mem_protect_err), + /* GBENU Module 6 */ + GBENU_STATS_P6(rx_good_frames), + GBENU_STATS_P6(rx_broadcast_frames), + GBENU_STATS_P6(rx_multicast_frames), + GBENU_STATS_P6(rx_pause_frames), + GBENU_STATS_P6(rx_crc_errors), + GBENU_STATS_P6(rx_align_code_errors), + GBENU_STATS_P6(rx_oversized_frames), + GBENU_STATS_P6(rx_jabber_frames), + GBENU_STATS_P6(rx_undersized_frames), + GBENU_STATS_P6(rx_fragments), + GBENU_STATS_P6(ale_drop), + GBENU_STATS_P6(ale_overrun_drop), + GBENU_STATS_P6(rx_bytes), + GBENU_STATS_P6(tx_good_frames), + GBENU_STATS_P6(tx_broadcast_frames), + GBENU_STATS_P6(tx_multicast_frames), + GBENU_STATS_P6(tx_pause_frames), + GBENU_STATS_P6(tx_deferred_frames), + GBENU_STATS_P6(tx_collision_frames), + GBENU_STATS_P6(tx_single_coll_frames), + GBENU_STATS_P6(tx_mult_coll_frames), + GBENU_STATS_P6(tx_excessive_collisions), + GBENU_STATS_P6(tx_late_collisions), + GBENU_STATS_P6(rx_ipg_error), + GBENU_STATS_P6(tx_carrier_sense_errors), + GBENU_STATS_P6(tx_bytes), + GBENU_STATS_P6(tx_64B_frames), + GBENU_STATS_P6(tx_65_to_127B_frames), + GBENU_STATS_P6(tx_128_to_255B_frames), + GBENU_STATS_P6(tx_256_to_511B_frames), + GBENU_STATS_P6(tx_512_to_1023B_frames), + GBENU_STATS_P6(tx_1024B_frames), + GBENU_STATS_P6(net_bytes), + GBENU_STATS_P6(rx_bottom_fifo_drop), + GBENU_STATS_P6(rx_port_mask_drop), + GBENU_STATS_P6(rx_top_fifo_drop), + GBENU_STATS_P6(ale_rate_limit_drop), + GBENU_STATS_P6(ale_vid_ingress_drop), + GBENU_STATS_P6(ale_da_eq_sa_drop), + GBENU_STATS_P6(ale_unknown_ucast), + GBENU_STATS_P6(ale_unknown_ucast_bytes), + GBENU_STATS_P6(ale_unknown_mcast), + GBENU_STATS_P6(ale_unknown_mcast_bytes), + GBENU_STATS_P6(ale_unknown_bcast), + GBENU_STATS_P6(ale_unknown_bcast_bytes), + GBENU_STATS_P6(tx_mem_protect_err), + /* GBENU Module 7 */ + GBENU_STATS_P7(rx_good_frames), + GBENU_STATS_P7(rx_broadcast_frames), + GBENU_STATS_P7(rx_multicast_frames), + GBENU_STATS_P7(rx_pause_frames), + GBENU_STATS_P7(rx_crc_errors), + GBENU_STATS_P7(rx_align_code_errors), + GBENU_STATS_P7(rx_oversized_frames), + GBENU_STATS_P7(rx_jabber_frames), + GBENU_STATS_P7(rx_undersized_frames), + GBENU_STATS_P7(rx_fragments), + GBENU_STATS_P7(ale_drop), + GBENU_STATS_P7(ale_overrun_drop), + GBENU_STATS_P7(rx_bytes), + GBENU_STATS_P7(tx_good_frames), + GBENU_STATS_P7(tx_broadcast_frames), + GBENU_STATS_P7(tx_multicast_frames), + GBENU_STATS_P7(tx_pause_frames), + GBENU_STATS_P7(tx_deferred_frames), + GBENU_STATS_P7(tx_collision_frames), + GBENU_STATS_P7(tx_single_coll_frames), + GBENU_STATS_P7(tx_mult_coll_frames), + GBENU_STATS_P7(tx_excessive_collisions), + GBENU_STATS_P7(tx_late_collisions), + GBENU_STATS_P7(rx_ipg_error), + GBENU_STATS_P7(tx_carrier_sense_errors), + GBENU_STATS_P7(tx_bytes), + GBENU_STATS_P7(tx_64B_frames), + GBENU_STATS_P7(tx_65_to_127B_frames), + GBENU_STATS_P7(tx_128_to_255B_frames), + GBENU_STATS_P7(tx_256_to_511B_frames), + GBENU_STATS_P7(tx_512_to_1023B_frames), + GBENU_STATS_P7(tx_1024B_frames), + GBENU_STATS_P7(net_bytes), + GBENU_STATS_P7(rx_bottom_fifo_drop), + GBENU_STATS_P7(rx_port_mask_drop), + GBENU_STATS_P7(rx_top_fifo_drop), + GBENU_STATS_P7(ale_rate_limit_drop), + GBENU_STATS_P7(ale_vid_ingress_drop), + GBENU_STATS_P7(ale_da_eq_sa_drop), + GBENU_STATS_P7(ale_unknown_ucast), + GBENU_STATS_P7(ale_unknown_ucast_bytes), + GBENU_STATS_P7(ale_unknown_mcast), + GBENU_STATS_P7(ale_unknown_mcast_bytes), + GBENU_STATS_P7(ale_unknown_bcast), + GBENU_STATS_P7(ale_unknown_bcast_bytes), + GBENU_STATS_P7(tx_mem_protect_err), + /* GBENU Module 8 */ + GBENU_STATS_P8(rx_good_frames), + GBENU_STATS_P8(rx_broadcast_frames), + GBENU_STATS_P8(rx_multicast_frames), + GBENU_STATS_P8(rx_pause_frames), + GBENU_STATS_P8(rx_crc_errors), + GBENU_STATS_P8(rx_align_code_errors), + GBENU_STATS_P8(rx_oversized_frames), + GBENU_STATS_P8(rx_jabber_frames), + GBENU_STATS_P8(rx_undersized_frames), + GBENU_STATS_P8(rx_fragments), + GBENU_STATS_P8(ale_drop), + GBENU_STATS_P8(ale_overrun_drop), + GBENU_STATS_P8(rx_bytes), + GBENU_STATS_P8(tx_good_frames), + GBENU_STATS_P8(tx_broadcast_frames), + GBENU_STATS_P8(tx_multicast_frames), + GBENU_STATS_P8(tx_pause_frames), + GBENU_STATS_P8(tx_deferred_frames), + GBENU_STATS_P8(tx_collision_frames), + GBENU_STATS_P8(tx_single_coll_frames), + GBENU_STATS_P8(tx_mult_coll_frames), + GBENU_STATS_P8(tx_excessive_collisions), + GBENU_STATS_P8(tx_late_collisions), + GBENU_STATS_P8(rx_ipg_error), + GBENU_STATS_P8(tx_carrier_sense_errors), + GBENU_STATS_P8(tx_bytes), + GBENU_STATS_P8(tx_64B_frames), + GBENU_STATS_P8(tx_65_to_127B_frames), + GBENU_STATS_P8(tx_128_to_255B_frames), + GBENU_STATS_P8(tx_256_to_511B_frames), + GBENU_STATS_P8(tx_512_to_1023B_frames), + GBENU_STATS_P8(tx_1024B_frames), + GBENU_STATS_P8(net_bytes), + GBENU_STATS_P8(rx_bottom_fifo_drop), + GBENU_STATS_P8(rx_port_mask_drop), + GBENU_STATS_P8(rx_top_fifo_drop), + GBENU_STATS_P8(ale_rate_limit_drop), + GBENU_STATS_P8(ale_vid_ingress_drop), + GBENU_STATS_P8(ale_da_eq_sa_drop), + GBENU_STATS_P8(ale_unknown_ucast), + GBENU_STATS_P8(ale_unknown_ucast_bytes), + GBENU_STATS_P8(ale_unknown_mcast), + GBENU_STATS_P8(ale_unknown_mcast_bytes), + GBENU_STATS_P8(ale_unknown_bcast), + GBENU_STATS_P8(ale_unknown_bcast_bytes), + GBENU_STATS_P8(tx_mem_protect_err), +}; + +#define XGBE_STATS0_INFO(field) \ +{ \ + "GBE_0:"#field, XGBE_STATS0_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} + +#define XGBE_STATS1_INFO(field) \ +{ \ + "GBE_1:"#field, XGBE_STATS1_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} + +#define XGBE_STATS2_INFO(field) \ +{ \ + "GBE_2:"#field, XGBE_STATS2_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} static const struct netcp_ethtool_stat xgbe10_et_stats[] = { /* GBE module 0 */ - {XGBE_STATS0_INFO(rx_good_frames)}, - {XGBE_STATS0_INFO(rx_broadcast_frames)}, - {XGBE_STATS0_INFO(rx_multicast_frames)}, - {XGBE_STATS0_INFO(rx_oversized_frames)}, - {XGBE_STATS0_INFO(rx_undersized_frames)}, - {XGBE_STATS0_INFO(overrun_type4)}, - {XGBE_STATS0_INFO(overrun_type5)}, - {XGBE_STATS0_INFO(rx_bytes)}, - {XGBE_STATS0_INFO(tx_good_frames)}, - {XGBE_STATS0_INFO(tx_broadcast_frames)}, - {XGBE_STATS0_INFO(tx_multicast_frames)}, - {XGBE_STATS0_INFO(tx_bytes)}, - {XGBE_STATS0_INFO(tx_64byte_frames)}, - {XGBE_STATS0_INFO(tx_65_to_127byte_frames)}, - {XGBE_STATS0_INFO(tx_128_to_255byte_frames)}, - {XGBE_STATS0_INFO(tx_256_to_511byte_frames)}, - {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)}, - {XGBE_STATS0_INFO(tx_1024byte_frames)}, - {XGBE_STATS0_INFO(net_bytes)}, - {XGBE_STATS0_INFO(rx_sof_overruns)}, - {XGBE_STATS0_INFO(rx_mof_overruns)}, - {XGBE_STATS0_INFO(rx_dma_overruns)}, + XGBE_STATS0_INFO(rx_good_frames), + XGBE_STATS0_INFO(rx_broadcast_frames), + XGBE_STATS0_INFO(rx_multicast_frames), + XGBE_STATS0_INFO(rx_oversized_frames), + XGBE_STATS0_INFO(rx_undersized_frames), + XGBE_STATS0_INFO(overrun_type4), + XGBE_STATS0_INFO(overrun_type5), + XGBE_STATS0_INFO(rx_bytes), + XGBE_STATS0_INFO(tx_good_frames), + XGBE_STATS0_INFO(tx_broadcast_frames), + XGBE_STATS0_INFO(tx_multicast_frames), + XGBE_STATS0_INFO(tx_bytes), + XGBE_STATS0_INFO(tx_64byte_frames), + XGBE_STATS0_INFO(tx_65_to_127byte_frames), + XGBE_STATS0_INFO(tx_128_to_255byte_frames), + XGBE_STATS0_INFO(tx_256_to_511byte_frames), + XGBE_STATS0_INFO(tx_512_to_1023byte_frames), + XGBE_STATS0_INFO(tx_1024byte_frames), + XGBE_STATS0_INFO(net_bytes), + XGBE_STATS0_INFO(rx_sof_overruns), + XGBE_STATS0_INFO(rx_mof_overruns), + XGBE_STATS0_INFO(rx_dma_overruns), /* XGBE module 1 */ - {XGBE_STATS1_INFO(rx_good_frames)}, - {XGBE_STATS1_INFO(rx_broadcast_frames)}, - {XGBE_STATS1_INFO(rx_multicast_frames)}, - {XGBE_STATS1_INFO(rx_pause_frames)}, - {XGBE_STATS1_INFO(rx_crc_errors)}, - {XGBE_STATS1_INFO(rx_align_code_errors)}, - {XGBE_STATS1_INFO(rx_oversized_frames)}, - {XGBE_STATS1_INFO(rx_jabber_frames)}, - {XGBE_STATS1_INFO(rx_undersized_frames)}, - {XGBE_STATS1_INFO(rx_fragments)}, - {XGBE_STATS1_INFO(overrun_type4)}, - {XGBE_STATS1_INFO(overrun_type5)}, - {XGBE_STATS1_INFO(rx_bytes)}, - {XGBE_STATS1_INFO(tx_good_frames)}, - {XGBE_STATS1_INFO(tx_broadcast_frames)}, - {XGBE_STATS1_INFO(tx_multicast_frames)}, - {XGBE_STATS1_INFO(tx_pause_frames)}, - {XGBE_STATS1_INFO(tx_deferred_frames)}, - {XGBE_STATS1_INFO(tx_collision_frames)}, - {XGBE_STATS1_INFO(tx_single_coll_frames)}, - {XGBE_STATS1_INFO(tx_mult_coll_frames)}, - {XGBE_STATS1_INFO(tx_excessive_collisions)}, - {XGBE_STATS1_INFO(tx_late_collisions)}, - {XGBE_STATS1_INFO(tx_underrun)}, - {XGBE_STATS1_INFO(tx_carrier_sense_errors)}, - {XGBE_STATS1_INFO(tx_bytes)}, - {XGBE_STATS1_INFO(tx_64byte_frames)}, - {XGBE_STATS1_INFO(tx_65_to_127byte_frames)}, - {XGBE_STATS1_INFO(tx_128_to_255byte_frames)}, - {XGBE_STATS1_INFO(tx_256_to_511byte_frames)}, - {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)}, - {XGBE_STATS1_INFO(tx_1024byte_frames)}, - {XGBE_STATS1_INFO(net_bytes)}, - {XGBE_STATS1_INFO(rx_sof_overruns)}, - {XGBE_STATS1_INFO(rx_mof_overruns)}, - {XGBE_STATS1_INFO(rx_dma_overruns)}, + XGBE_STATS1_INFO(rx_good_frames), + XGBE_STATS1_INFO(rx_broadcast_frames), + XGBE_STATS1_INFO(rx_multicast_frames), + XGBE_STATS1_INFO(rx_pause_frames), + XGBE_STATS1_INFO(rx_crc_errors), + XGBE_STATS1_INFO(rx_align_code_errors), + XGBE_STATS1_INFO(rx_oversized_frames), + XGBE_STATS1_INFO(rx_jabber_frames), + XGBE_STATS1_INFO(rx_undersized_frames), + XGBE_STATS1_INFO(rx_fragments), + XGBE_STATS1_INFO(overrun_type4), + XGBE_STATS1_INFO(overrun_type5), + XGBE_STATS1_INFO(rx_bytes), + XGBE_STATS1_INFO(tx_good_frames), + XGBE_STATS1_INFO(tx_broadcast_frames), + XGBE_STATS1_INFO(tx_multicast_frames), + XGBE_STATS1_INFO(tx_pause_frames), + XGBE_STATS1_INFO(tx_deferred_frames), + XGBE_STATS1_INFO(tx_collision_frames), + XGBE_STATS1_INFO(tx_single_coll_frames), + XGBE_STATS1_INFO(tx_mult_coll_frames), + XGBE_STATS1_INFO(tx_excessive_collisions), + XGBE_STATS1_INFO(tx_late_collisions), + XGBE_STATS1_INFO(tx_underrun), + XGBE_STATS1_INFO(tx_carrier_sense_errors), + XGBE_STATS1_INFO(tx_bytes), + XGBE_STATS1_INFO(tx_64byte_frames), + XGBE_STATS1_INFO(tx_65_to_127byte_frames), + XGBE_STATS1_INFO(tx_128_to_255byte_frames), + XGBE_STATS1_INFO(tx_256_to_511byte_frames), + XGBE_STATS1_INFO(tx_512_to_1023byte_frames), + XGBE_STATS1_INFO(tx_1024byte_frames), + XGBE_STATS1_INFO(net_bytes), + XGBE_STATS1_INFO(rx_sof_overruns), + XGBE_STATS1_INFO(rx_mof_overruns), + XGBE_STATS1_INFO(rx_dma_overruns), /* XGBE module 2 */ - {XGBE_STATS2_INFO(rx_good_frames)}, - {XGBE_STATS2_INFO(rx_broadcast_frames)}, - {XGBE_STATS2_INFO(rx_multicast_frames)}, - {XGBE_STATS2_INFO(rx_pause_frames)}, - {XGBE_STATS2_INFO(rx_crc_errors)}, - {XGBE_STATS2_INFO(rx_align_code_errors)}, - {XGBE_STATS2_INFO(rx_oversized_frames)}, - {XGBE_STATS2_INFO(rx_jabber_frames)}, - {XGBE_STATS2_INFO(rx_undersized_frames)}, - {XGBE_STATS2_INFO(rx_fragments)}, - {XGBE_STATS2_INFO(overrun_type4)}, - {XGBE_STATS2_INFO(overrun_type5)}, - {XGBE_STATS2_INFO(rx_bytes)}, - {XGBE_STATS2_INFO(tx_good_frames)}, - {XGBE_STATS2_INFO(tx_broadcast_frames)}, - {XGBE_STATS2_INFO(tx_multicast_frames)}, - {XGBE_STATS2_INFO(tx_pause_frames)}, - {XGBE_STATS2_INFO(tx_deferred_frames)}, - {XGBE_STATS2_INFO(tx_collision_frames)}, - {XGBE_STATS2_INFO(tx_single_coll_frames)}, - {XGBE_STATS2_INFO(tx_mult_coll_frames)}, - {XGBE_STATS2_INFO(tx_excessive_collisions)}, - {XGBE_STATS2_INFO(tx_late_collisions)}, - {XGBE_STATS2_INFO(tx_underrun)}, - {XGBE_STATS2_INFO(tx_carrier_sense_errors)}, - {XGBE_STATS2_INFO(tx_bytes)}, - {XGBE_STATS2_INFO(tx_64byte_frames)}, - {XGBE_STATS2_INFO(tx_65_to_127byte_frames)}, - {XGBE_STATS2_INFO(tx_128_to_255byte_frames)}, - {XGBE_STATS2_INFO(tx_256_to_511byte_frames)}, - {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)}, - {XGBE_STATS2_INFO(tx_1024byte_frames)}, - {XGBE_STATS2_INFO(net_bytes)}, - {XGBE_STATS2_INFO(rx_sof_overruns)}, - {XGBE_STATS2_INFO(rx_mof_overruns)}, - {XGBE_STATS2_INFO(rx_dma_overruns)}, + XGBE_STATS2_INFO(rx_good_frames), + XGBE_STATS2_INFO(rx_broadcast_frames), + XGBE_STATS2_INFO(rx_multicast_frames), + XGBE_STATS2_INFO(rx_pause_frames), + XGBE_STATS2_INFO(rx_crc_errors), + XGBE_STATS2_INFO(rx_align_code_errors), + XGBE_STATS2_INFO(rx_oversized_frames), + XGBE_STATS2_INFO(rx_jabber_frames), + XGBE_STATS2_INFO(rx_undersized_frames), + XGBE_STATS2_INFO(rx_fragments), + XGBE_STATS2_INFO(overrun_type4), + XGBE_STATS2_INFO(overrun_type5), + XGBE_STATS2_INFO(rx_bytes), + XGBE_STATS2_INFO(tx_good_frames), + XGBE_STATS2_INFO(tx_broadcast_frames), + XGBE_STATS2_INFO(tx_multicast_frames), + XGBE_STATS2_INFO(tx_pause_frames), + XGBE_STATS2_INFO(tx_deferred_frames), + XGBE_STATS2_INFO(tx_collision_frames), + XGBE_STATS2_INFO(tx_single_coll_frames), + XGBE_STATS2_INFO(tx_mult_coll_frames), + XGBE_STATS2_INFO(tx_excessive_collisions), + XGBE_STATS2_INFO(tx_late_collisions), + XGBE_STATS2_INFO(tx_underrun), + XGBE_STATS2_INFO(tx_carrier_sense_errors), + XGBE_STATS2_INFO(tx_bytes), + XGBE_STATS2_INFO(tx_64byte_frames), + XGBE_STATS2_INFO(tx_65_to_127byte_frames), + XGBE_STATS2_INFO(tx_128_to_255byte_frames), + XGBE_STATS2_INFO(tx_256_to_511byte_frames), + XGBE_STATS2_INFO(tx_512_to_1023byte_frames), + XGBE_STATS2_INFO(tx_1024byte_frames), + XGBE_STATS2_INFO(net_bytes), + XGBE_STATS2_INFO(rx_sof_overruns), + XGBE_STATS2_INFO(rx_mof_overruns), + XGBE_STATS2_INFO(rx_dma_overruns), }; #define for_each_intf(i, priv) \ @@ -1066,9 +1796,16 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, if (!slave->open) return; - if (!SLAVE_LINK_IS_XGMII(slave)) - sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp), - sp); + if (!SLAVE_LINK_IS_XGMII(slave)) { + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + sgmii_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); + else + sgmii_link_state = + netcp_sgmii_get_port_link( + gbe_dev->sgmii_port_regs, sp); + } + phy_link_state = gbe_phy_link_status(slave); link_state = phy_link_state & sgmii_link_state; @@ -1137,6 +1874,7 @@ static int gbe_port_reset(struct gbe_slave *slave) static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, int max_rx_len) { + void __iomem *rx_maxlen_reg; u32 xgmii_mode; if (max_rx_len > NETCP_MAX_FRAME_SIZE) @@ -1150,7 +1888,12 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control)); } - writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen)); + if (IS_SS_ID_MU(gbe_dev)) + rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen); + else + rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen); + + writel(max_rx_len, rx_maxlen_reg); writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); } @@ -1242,6 +1985,12 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) static void gbe_init_host_port(struct gbe_priv *priv) { int bypass_en = 1; + + /* Host Tx Pri */ + if (IS_SS_ID_NU(priv)) + writel(HOST_TX_PRI_MAP_DEFAULT, + GBE_REG_ADDR(priv, host_port_regs, tx_pri_map)); + /* Max length register */ writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs, rx_maxlen)); @@ -1472,15 +2221,21 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg), GBE_RTL_VERSION(reg), GBE_IDENT(reg)); + /* For 10G and on NetCP 1.5, use directed to port */ + if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev)) + gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO; + if (gbe_dev->enable_ale) - gbe_intf->tx_pipe.dma_psflags = 0; + gbe_intf->tx_pipe.switch_to_port = 0; else - gbe_intf->tx_pipe.dma_psflags = port_num; + gbe_intf->tx_pipe.switch_to_port = port_num; - dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n", + dev_dbg(gbe_dev->dev, + "opened TX channel %s: %p with to port %d, flags %d\n", gbe_intf->tx_pipe.dma_chan_name, gbe_intf->tx_pipe.dma_channel, - gbe_intf->tx_pipe.dma_psflags); + gbe_intf->tx_pipe.switch_to_port, + gbe_intf->tx_pipe.flags); gbe_slave_stop(gbe_intf); @@ -1491,8 +2246,8 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); /* All statistics enabled and STAT AB visible by default */ - writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs, - stat_port_en)); + writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs, + stat_port_en)); ret = gbe_slave_open(gbe_intf); if (ret) @@ -1529,6 +2284,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, { int port_reg_num; u32 port_reg_ofs, emac_reg_ofs; + u32 port_reg_blk_sz, emac_reg_blk_sz; if (of_property_read_u32(node, "slave-port", &slave->slave_num)) { dev_err(gbe_dev->dev, "missing slave-port parameter\n"); @@ -1560,23 +2316,29 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, } else { port_reg_ofs = GBE13_SLAVE_PORT_OFFSET; } + emac_reg_ofs = GBE13_EMAC_OFFSET; + port_reg_blk_sz = 0x30; + emac_reg_blk_sz = 0x40; + } else if (IS_SS_ID_MU(gbe_dev)) { + port_reg_ofs = GBENU_SLAVE_PORT_OFFSET; + emac_reg_ofs = GBENU_EMAC_OFFSET; + port_reg_blk_sz = 0x1000; + emac_reg_blk_sz = 0x1000; } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; + emac_reg_ofs = XGBE10_EMAC_OFFSET; + port_reg_blk_sz = 0x30; + emac_reg_blk_sz = 0x40; } else { dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n", gbe_dev->ss_version); return -EINVAL; } - if (gbe_dev->ss_version == GBE_SS_VERSION_14) - emac_reg_ofs = GBE13_EMAC_OFFSET; - else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) - emac_reg_ofs = XGBE10_EMAC_OFFSET; - - slave->port_regs = gbe_dev->ss_regs + port_reg_ofs + - (0x30 * port_reg_num); - slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs + - (0x40 * slave->slave_num); + slave->port_regs = gbe_dev->switch_regs + port_reg_ofs + + (port_reg_blk_sz * port_reg_num); + slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs + + (emac_reg_blk_sz * slave->slave_num); if (gbe_dev->ss_version == GBE_SS_VERSION_14) { /* Initialize slave port register offsets */ @@ -1595,6 +2357,23 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, GBE_SET_REG_OFS(slave, emac_regs, soft_reset); GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); + } else if (IS_SS_ID_MU(gbe_dev)) { + /* Initialize slave port register offsets */ + GBENU_SET_REG_OFS(slave, port_regs, port_vlan); + GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map); + GBENU_SET_REG_OFS(slave, port_regs, sa_lo); + GBENU_SET_REG_OFS(slave, port_regs, sa_hi); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl); + GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype); + GBENU_SET_REG_OFS(slave, port_regs, ts_vlan); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2); + GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen); + + /* Initialize EMAC register offsets */ + GBENU_SET_REG_OFS(slave, emac_regs, mac_control); + GBENU_SET_REG_OFS(slave, emac_regs, soft_reset); + } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { /* Initialize slave port register offsets */ XGBE_SET_REG_OFS(slave, port_regs, port_vlan); @@ -1654,6 +2433,8 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, mac_phy_link = true; slave->open = true; + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + break; } /* of_phy_connect() is needed only for MAC-PHY interface */ @@ -1724,24 +2505,41 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, void __iomem *regs; int ret, i; - ret = of_address_to_resource(node, 0, &res); + ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res); if (ret) { - dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n", - node->name); + dev_err(gbe_dev->dev, + "Can't xlate xgbe of node(%s) ss address at %d\n", + node->name, XGBE_SS_REG_INDEX); return ret; } regs = devm_ioremap_resource(gbe_dev->dev, &res); if (IS_ERR(regs)) { - dev_err(gbe_dev->dev, "Failed to map xgbe register base\n"); + dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n"); return PTR_ERR(regs); } gbe_dev->ss_regs = regs; + ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't xlate xgbe of node(%s) sm address at %d\n", + node->name, XGBE_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; + ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); if (ret) { - dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n", - node->name); + dev_err(gbe_dev->dev, + "Can't xlate xgbe serdes of node(%s) address at %d\n", + node->name, XGBE_SERDES_REG_INDEX); return ret; } @@ -1753,9 +2551,9 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, gbe_dev->xgbe_serdes_regs = regs; gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - XGBE10_NUM_STAT_ENTRIES * - (XGBE10_NUM_SLAVES + 1) * sizeof(u64), - GFP_KERNEL); + XGBE10_NUM_STAT_ENTRIES * + (gbe_dev->max_num_ports) * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; @@ -1764,19 +2562,19 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, gbe_dev->ss_version = XGBE_SS_VERSION_10; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + XGBE10_SGMII_MODULE_OFFSET; - gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET; gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET; - for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++) - gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs + + for (i = 0; i < gbe_dev->max_num_ports; i++) + gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i); - gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET; - gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS; + gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = XGBE10_HOST_PORT_NUM; gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; gbe_dev->et_stats = xgbe10_et_stats; gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); + gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); @@ -1803,10 +2601,11 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev, void __iomem *regs; int ret; - ret = of_address_to_resource(node, 0, &res); + ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res); if (ret) { - dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n", - node->name); + dev_err(gbe_dev->dev, + "Can't translate of node(%s) of gbe ss address at %d\n", + node->name, GBE_SS_REG_INDEX); return ret; } @@ -1823,34 +2622,67 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev, static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, struct device_node *node) { + struct resource res; void __iomem *regs; - int i; + int i, ret; + + ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbe node(%s) address at index %d\n", + node->name, GBE_SGMII34_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbe sgmii port34 register base\n"); + return PTR_ERR(regs); + } + gbe_dev->sgmii_port34_regs = regs; + + ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbe node(%s) address at index %d\n", + node->name, GBE_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbe switch module register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, GBE13_NUM_HW_STAT_ENTRIES * - GBE13_NUM_SLAVES * sizeof(u64), + gbe_dev->max_num_slaves * sizeof(u64), GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } - regs = gbe_dev->ss_regs; - gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET; - gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET; - gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET; - gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET; + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET; + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET; - for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++) - gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET + - (GBE_HW_STATS_REG_MAP_SZ * i); + for (i = 0; i < gbe_dev->max_num_slaves; i++) { + gbe_dev->hw_stats_regs[i] = + gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET + + (GBE_HW_STATS_REG_MAP_SZ * i); + } - gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET; - gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS; + gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBE13_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; gbe_dev->et_stats = gbe13_et_stats; gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); + gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL; /* Subsystem registers */ GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); @@ -1869,6 +2701,80 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, return 0; } +static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, + struct device_node *node) +{ + struct resource res; + void __iomem *regs; + int i, ret; + + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, + GBENU_NUM_HW_STAT_ENTRIES * + (gbe_dev->max_num_ports) * sizeof(u64), + GFP_KERNEL); + if (!gbe_dev->hw_stats) { + dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbenu node(%s) addr at index %d\n", + node->name, GBENU_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbenu switch module register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; + + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; + + for (i = 0; i < (gbe_dev->max_num_ports); i++) + gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + + GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i); + + gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; + gbe_dev->host_port = GBENU_HOST_PORT_NUM; + gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; + gbe_dev->et_stats = gbenu_et_stats; + gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; + + if (IS_SS_ID_NU(gbe_dev)) + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); + else + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + GBENU_ET_STATS_PORT_SIZE; + + /* Subsystem registers */ + GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); + + /* Switch module registers */ + GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, control); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype); + + /* Host port registers */ + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); + + /* For NU only. 2U does not need tx_pri_map. + * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads + * while 2U has only 1 such thread + */ + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); + return 0; +} + static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, struct device_node *node, void **inst_priv) { @@ -1888,6 +2794,21 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (!gbe_dev) return -ENOMEM; + if (of_device_is_compatible(node, "ti,netcp-gbe-5") || + of_device_is_compatible(node, "ti,netcp-gbe")) { + gbe_dev->max_num_slaves = 4; + } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) { + gbe_dev->max_num_slaves = 8; + } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) { + gbe_dev->max_num_slaves = 1; + } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) { + gbe_dev->max_num_slaves = 2; + } else { + dev_err(dev, "device tree node for unknown device\n"); + return -EINVAL; + } + gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1; + gbe_dev->dev = dev; gbe_dev->netcp_device = netcp_device; gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE; @@ -1923,7 +2844,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (ret) goto quit; - ret = set_gbe_ethss14_priv(gbe_dev, node); + dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); + + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + ret = set_gbe_ethss14_priv(gbe_dev, node); + else if (IS_SS_ID_MU(gbe_dev)) + ret = set_gbenu_ethss_priv(gbe_dev, node); + else + ret = -ENODEV; + if (ret) goto quit; } else if (!strcmp(node->name, "xgbe")) { @@ -1963,6 +2892,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, continue; } gbe_dev->num_slaves++; + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + break; } if (!gbe_dev->num_slaves) @@ -1971,7 +2902,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, /* Initialize Secondary slave ports */ secondary_ports = of_get_child_by_name(node, "secondary-slave-ports"); INIT_LIST_HEAD(&gbe_dev->secondary_slaves); - if (secondary_ports) + if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves)) init_secondary_ports(gbe_dev, secondary_ports); of_node_put(secondary_ports); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index bea8cd2bb56c..a3f7610002aa 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -838,7 +838,8 @@ static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta) return ret; } -static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) { int ret = 0; struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); @@ -850,7 +851,7 @@ static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts) } static int ptp_mpipe_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { int ret = 0; struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); @@ -876,8 +877,8 @@ static struct ptp_clock_info ptp_mpipe_caps = { .pps = 0, .adjfreq = ptp_mpipe_adjfreq, .adjtime = ptp_mpipe_adjtime, - .gettime = ptp_mpipe_gettime, - .settime = ptp_mpipe_settime, + .gettime64 = ptp_mpipe_gettime, + .settime64 = ptp_mpipe_settime, .enable = ptp_mpipe_enable, }; @@ -1122,7 +1123,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, addr + i * sizeof(struct tile_net_comps); /* If this is a network cpu, create an iqueue. */ - if (cpu_isset(cpu, network_cpus_map)) { + if (cpumask_test_cpu(cpu, &network_cpus_map)) { order = get_order(NOTIF_RING_SIZE); page = homecache_alloc_pages(GFP_KERNEL, order, cpu); if (page == NULL) { @@ -1298,7 +1299,7 @@ static int tile_net_init_mpipe(struct net_device *dev) int first_ring, ring; int instance = mpipe_instance(dev); struct mpipe_data *md = &mpipe_data[instance]; - int network_cpus_count = cpus_weight(network_cpus_map); + int network_cpus_count = cpumask_weight(&network_cpus_map); if (!hash_default) { netdev_err(dev, "Networking requires hash_default!\n"); diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig index 74acb5cf6099..5d244b6b5e3a 100644 --- a/drivers/net/ethernet/toshiba/Kconfig +++ b/drivers/net/ethernet/toshiba/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_TOSHIBA bool "Toshiba devices" default y - depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB || MIPS) || PPC_PS3 + depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3 ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -42,7 +42,7 @@ config GELIC_WIRELESS config SPIDER_NET tristate "Spider Gigabit Ethernet driver" - depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) + depends on PCI && PPC_IBM_CELL_BLADE select FW_LOADER select SUNGEM_PHY ---help--- diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index bb7992804664..ac62a5e248b0 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1065,7 +1065,7 @@ refill: /* * this call can fail, but for now, just leave this - * decriptor without skb + * descriptor without skb */ gelic_descr_prepare_rx(card, descr); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 0a7f2e77557f..13214a6492ac 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1167,7 +1167,7 @@ static int gelic_wl_set_ap(struct net_device *netdev, } else { pr_debug("%s: clear bssid\n", __func__); clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); - memset(wl->bssid, 0, ETH_ALEN); + eth_zero_addr(wl->bssid); } spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: ->\n", __func__); @@ -1189,7 +1189,7 @@ static int gelic_wl_get_ap(struct net_device *netdev, memcpy(data->ap_addr.sa_data, wl->active_bssid, ETH_ALEN); } else - memset(data->ap_addr.sa_data, 0, ETH_ALEN); + eth_zero_addr(data->ap_addr.sa_data); spin_unlock_irqrestore(&wl->lock, irqflag); mutex_unlock(&wl->assoc_stat_lock); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 17e276651601..de2850497c09 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. - The compiler will convert <unsigned>'%'<2^N> into a bit mask. - Making the Tx ring too large decreases the effectiveness of channel - bonding and packet priority. - There are no ill effects from too-large receive rings. */ -#define TX_RING_SIZE 16 -#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ + * The compiler will convert <unsigned>'%'<2^N> into a bit mask. + * Making the Tx ring too large decreases the effectiveness of channel + * bonding and packet priority. + * With BQL support, we can increase TX ring safely. + * There are no ill effects from too-large receive rings. + */ +#define TX_RING_SIZE 64 +#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ #define RX_RING_SIZE 64 /* Operational parameters that usually are not changed. */ @@ -286,7 +288,7 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); * The .data field is currently only used to store quirks */ static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns; -static struct of_device_id rhine_of_tbl[] = { +static const struct of_device_id rhine_of_tbl[] = { { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks }, { } /* terminate list */ }; @@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev) } rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); + netdev_reset_queue(dev); } static void free_tbufs(struct net_device* dev) @@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, else rp->tx_ring[entry].tx_status = 0; + netdev_sent_queue(dev, skb->len); /* lock eth irq */ wmb(); rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); @@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev) struct rhine_private *rp = netdev_priv(dev); struct device *hwdev = dev->dev.parent; int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; + unsigned int pkts_compl = 0, bytes_compl = 0; + struct sk_buff *skb; /* find and cleanup dirty tx descriptors */ while (rp->dirty_tx != rp->cur_tx) { @@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev) entry, txstatus); if (txstatus & DescOwn) break; + skb = rp->tx_skbuff[entry]; if (txstatus & 0x8000) { netif_dbg(rp, tx_done, dev, "Transmit error, Tx status %08x\n", txstatus); @@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev) (txstatus >> 3) & 0xF, txstatus & 0xF); u64_stats_update_begin(&rp->tx_stats.syncp); - rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; + rp->tx_stats.bytes += skb->len; rp->tx_stats.packets++; u64_stats_update_end(&rp->tx_stats.syncp); } @@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev) if (rp->tx_skbuff_dma[entry]) { dma_unmap_single(hwdev, rp->tx_skbuff_dma[entry], - rp->tx_skbuff[entry]->len, + skb->len, DMA_TO_DEVICE); } - dev_consume_skb_any(rp->tx_skbuff[entry]); + bytes_compl += skb->len; + pkts_compl++; + dev_consume_skb_any(skb); rp->tx_skbuff[entry] = NULL; entry = (++rp->dirty_tx) % TX_RING_SIZE; } + + netdev_completed_queue(dev, pkts_compl, bytes_compl); if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index c20206f83cc1..ae68afd50a15 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -392,7 +392,7 @@ MODULE_DEVICE_TABLE(pci, velocity_pci_id_table); * Describe the OF device identifiers that we support in this * device driver. Used for devicetree nodes. */ -static struct of_device_id velocity_of_ids[] = { +static const struct of_device_id velocity_of_ids[] = { { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] }, { /* Sentinel */ }, }; diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a495931a66a1..8b282d0b169c 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -56,7 +56,7 @@ MODULE_LICENSE("GPL"); #define W5100_S0_REGS 0x0400 #define W5100_S0_MR 0x0400 /* S0 Mode Register */ -#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscous) */ +#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */ #define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */ #define W5100_S0_CR 0x0401 /* S0 Command Register */ #define S0_CR_OPEN 0x01 /* OPEN command */ @@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5100_write(priv, W5100_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 09322d9db578..8da7b930ff59 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -63,7 +63,7 @@ MODULE_LICENSE("GPL"); #define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */ #define W5300_S0_MR 0x0200 /* S0 Mode Register */ #define S0_MR_CLOSED 0x0000 /* Close mode */ -#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscous) */ +#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscuous) */ #define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */ #define W5300_S0_CR 0x0202 /* S0 Command Register */ #define S0_CR_OPEN 0x0001 /* OPEN command */ @@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index dbcbf0c5bcfa..690a4c36b316 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1157,7 +1157,7 @@ static int temac_of_remove(struct platform_device *op) return 0; } -static struct of_device_id temac_of_match[] = { +static const struct of_device_id temac_of_match[] = { { .compatible = "xlnx,xps-ll-temac-1.01.b", }, { .compatible = "xlnx,xps-ll-temac-2.00.a", }, { .compatible = "xlnx,xps-ll-temac-2.02.a", }, diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index a6d2860b712c..28b7e7d9c272 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -48,7 +48,7 @@ #define AXIENET_REGS_N 32 /* Match table for of_platform binding */ -static struct of_device_id axienet_of_match[] = { +static const struct of_device_id axienet_of_match[] = { { .compatible = "xlnx,axi-ethernet-1.00.a", }, { .compatible = "xlnx,axi-ethernet-1.01.a", }, { .compatible = "xlnx,axi-ethernet-2.01.a", }, diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 9d4ce388510a..6008eee01a33 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1062,7 +1062,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s) } else { dev_warn(&ofdev->dev, "Parameter %s not found," "defaulting to false\n", s); - return 0; + return false; } } @@ -1231,7 +1231,7 @@ static struct net_device_ops xemaclite_netdev_ops = { }; /* Match table for OF platform binding */ -static struct of_device_id xemaclite_of_match[] = { +static const struct of_device_id xemaclite_of_match[] = { { .compatible = "xlnx,opb-ethernetlite-1.01.a", }, { .compatible = "xlnx,opb-ethernetlite-1.01.b", }, { .compatible = "xlnx,xps-ethernetlite-1.00.a", }, diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index f7e0f0f7c2e2..5138407941cf 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev) int i; static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; - if (dev->flags & IFF_ALLMULTI) { + if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { for (i = 0; i < ETH_ALEN; i++) { __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); @@ -954,7 +954,7 @@ static void eth_set_mcast_list(struct net_device *dev) return; } - memset(diffs, 0, ETH_ALEN); + eth_zero_addr(diffs); addr = NULL; netdev_for_each_mc_addr(ha, dev) { |