diff options
Diffstat (limited to 'drivers/net/ethernet')
346 files changed, 40290 insertions, 8403 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 4547a1b8b958..7677c745fb30 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -562,7 +562,7 @@ static void el3_common_remove (struct net_device *dev) } #ifdef CONFIG_EISA -static int __init el3_eisa_probe (struct device *device) +static int el3_eisa_probe(struct device *device) { short i; int ioaddr, irq, if_port; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 2839af00f20c..1c5f3b273e6a 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -907,7 +907,7 @@ static struct eisa_device_id vortex_eisa_ids[] = { }; MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); -static int __init vortex_eisa_probe(struct device *device) +static int vortex_eisa_probe(struct device *device) { void __iomem *ioaddr; struct eisa_device *edev; diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 0443654f0339..c89b9aeeceb6 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -372,7 +372,7 @@ static int ax_mii_probe(struct net_device *dev) ax->phy_dev = phy_dev; netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); + phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); return 0; } @@ -627,7 +627,7 @@ static int ax_mii_init(struct net_device *dev) struct platform_device *pdev = to_platform_device(dev->dev.parent); struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); - int err, i; + int err; ax->bb_ctrl.ops = &bb_ops; ax->addr_memr = ei_local->mem + AX_MEMR; @@ -642,23 +642,12 @@ static int ax_mii_init(struct net_device *dev) snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!ax->mii_bus->irq) { - err = -ENOMEM; - goto out_free_mdio_bitbang; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - ax->mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(ax->mii_bus); if (err) - goto out_free_irq; + goto out_free_mdio_bitbang; return 0; - out_free_irq: - kfree(ax->mii_bus->irq); out_free_mdio_bitbang: free_mdio_bitbang(ax->mii_bus); out: diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 31c5e476fd64..0b13af8e4070 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -122,6 +122,7 @@ config FEALNX cards. <http://www.myson.com.tw/> source "drivers/net/ethernet/natsemi/Kconfig" +source "drivers/net/ethernet/netronome/Kconfig" source "drivers/net/ethernet/8390/Kconfig" config NET_NETX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 071f84eb6f3f..38dc1a776a2b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ +obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 096531a73124..74139cb7f849 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -380,9 +380,8 @@ static void bfin_mac_adjust_link(struct net_device *dev) static int mii_probe(struct net_device *dev, int phy_mode) { struct bfin_mac_local *lp = netdev_priv(dev); - struct phy_device *phydev = NULL; + struct phy_device *phydev; unsigned short sysctl; - int i; u32 sclk, mdc_div; /* Enable PHY output early */ @@ -396,18 +395,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); bfin_write_EMAC_SYSCTL(sysctl); - /* search for connected PHY device */ - for (i = 0; i < PHY_MAX_ADDR; ++i) { - struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i]; - - if (!tmp_phydev) - continue; /* no PHY here... */ - - phydev = tmp_phydev; - break; /* found it */ - } - - /* now we are supposed to have a proper phydev, to attach to... */ + phydev = phy_find_first(lp->mii_bus); if (!phydev) { netdev_err(dev, "no phy device found\n"); return -ENODEV; @@ -419,7 +407,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) return -EINVAL; } - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &bfin_mac_adjust_link, phy_mode); if (IS_ERR(phydev)) { @@ -444,10 +432,8 @@ static int mii_probe(struct net_device *dev, int phy_mode) lp->old_duplex = -1; lp->phydev = phydev; - pr_info("attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq, - MDC_CLK, mdc_div, sclk/1000000); + phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", + MDC_CLK, mdc_div, sclk / 1000000); return 0; } @@ -1840,12 +1826,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev) snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (!miibus->irq) - goto out_err_irq_alloc; - - for (i = rc; i < PHY_MAX_ADDR; ++i) - miibus->irq[i] = PHY_POLL; rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR); if (rc != mii_bus_pd->phydev_number) @@ -1864,14 +1844,12 @@ static int bfin_mii_bus_probe(struct platform_device *pdev) rc = mdiobus_register(miibus); if (rc) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); - goto out_err_mdiobus_register; + goto out_err_irq_alloc; } platform_set_drvdata(pdev, miibus); return 0; -out_err_mdiobus_register: - kfree(miibus->irq); out_err_irq_alloc: mdiobus_free(miibus); out_err_alloc: @@ -1887,7 +1865,6 @@ static int bfin_mii_bus_remove(struct platform_device *pdev) dev_get_platdata(&pdev->dev); mdiobus_unregister(miibus); - kfree(miibus->irq); mdiobus_free(miibus); peripheral_free_list(mii_bus_pd->mac_peripherals); @@ -1912,21 +1889,21 @@ static struct platform_driver bfin_mac_driver = { }, }; +static struct platform_driver * const drivers[] = { + &bfin_mii_bus_driver, + &bfin_mac_driver, +}; + static int __init bfin_mac_init(void) { - int ret; - ret = platform_driver_register(&bfin_mii_bus_driver); - if (!ret) - return platform_driver_register(&bfin_mac_driver); - return -ENODEV; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(bfin_mac_init); static void __exit bfin_mac_cleanup(void) { - platform_driver_unregister(&bfin_mac_driver); - platform_driver_unregister(&bfin_mii_bus_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(bfin_mac_cleanup); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 20bf55dbd76f..b873531c5575 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1337,11 +1337,6 @@ static int greth_mdio_init(struct greth_private *greth) greth->mdio->write = greth_mdio_write; greth->mdio->priv = greth; - greth->mdio->irq = greth->mdio_irqs; - - for (phy = 0; phy < PHY_MAX_ADDR; phy++) - greth->mdio->irq[phy] = PHY_POLL; - ret = mdiobus_register(greth->mdio); if (ret) { goto error; diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index ae16ac94daf8..92dd918e4a83 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -125,7 +125,6 @@ struct greth_private { struct phy_device *phy; struct mii_bus *mdio; - int mdio_irqs[PHY_MAX_ADDR]; unsigned int link; unsigned int speed; unsigned int duplex; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index e0f3d197e7f2..3f3bcbea15bd 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -1235,7 +1235,7 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) if (!phydev) return -EIO; - return et131x_phy_mii_read(adapter, phydev->addr, reg, value); + return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value); } static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, @@ -1462,7 +1462,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) data &= ~BMCR_PDOWN; if (down) data |= BMCR_PDOWN; - et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); + et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data); } /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ @@ -1490,7 +1490,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) else lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); - et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); + et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2); } } @@ -3192,14 +3192,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18 | 0x4); - et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, - register18 | 0x8402); - et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, - register18 | 511); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_INDEX_REG, register18 | 0x8402); + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18); } @@ -3212,8 +3212,8 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_CONFIG, ®); reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; - et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, - reg); + et131x_mii_write(adapter, phydev->mdio.addr, + PHY_CONFIG, reg); } et131x_set_rx_dma_timer(adapter); @@ -3226,14 +3226,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18 | 0x4); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_INDEX_REG, register18 | 0x8402); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_DATA_REG, register18 | 511); - et131x_mii_write(adapter, phydev->addr, + et131x_mii_write(adapter, phydev->mdio.addr, PHY_MPHY_CONTROL_REG, register18); } @@ -3265,7 +3265,7 @@ static int et131x_mii_probe(struct net_device *netdev) return -ENODEV; } - phydev = phy_connect(netdev, dev_name(&phydev->dev), + phydev = phy_connect(netdev, phydev_name(phydev), &et131x_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -3289,9 +3289,7 @@ static int et131x_mii_probe(struct net_device *netdev) phydev->autoneg = AUTONEG_ENABLE; adapter->phydev = phydev; - dev_info(&adapter->pdev->dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; } @@ -3327,7 +3325,6 @@ static void et131x_pci_remove(struct pci_dev *pdev) netif_napi_del(&adapter->napi); phy_disconnect(adapter->phydev); mdiobus_unregister(adapter->mii_bus); - kfree(adapter->mii_bus->irq); mdiobus_free(adapter->mii_bus); et131x_adapter_memory_free(adapter); @@ -3946,7 +3943,6 @@ static int et131x_pci_setup(struct pci_dev *pdev, struct net_device *netdev; struct et131x_adapter *adapter; int rc; - int ii; rc = pci_enable_device(pdev); if (rc < 0) { @@ -4036,18 +4032,11 @@ static int et131x_pci_setup(struct pci_dev *pdev, adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; - adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!adapter->mii_bus->irq) - goto err_mdio_free; - - for (ii = 0; ii < PHY_MAX_ADDR; ii++) - adapter->mii_bus->irq[ii] = PHY_POLL; rc = mdiobus_register(adapter->mii_bus); if (rc < 0) { dev_err(&pdev->dev, "failed to register MII bus\n"); - goto err_mdio_free_irq; + goto err_mdio_free; } rc = et131x_mii_probe(netdev); @@ -4087,8 +4076,6 @@ err_phy_disconnect: phy_disconnect(adapter->phydev); err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); -err_mdio_free_irq: - kfree(adapter->mii_bus->irq); err_mdio_free: mdiobus_free(adapter->mii_bus); err_mem_free: diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index fe644823ceaf..17472851674f 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -131,7 +131,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) { struct altera_tse_private *priv = netdev_priv(dev); int ret; - int i; struct device_node *mdio_node = NULL; struct mii_bus *mdio = NULL; struct device_node *child_node = NULL; @@ -161,14 +160,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->write = &altera_tse_mdio_write; snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id); - mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (mdio->irq == NULL) { - ret = -ENOMEM; - goto out_free_mdio; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - mdio->irq[i] = PHY_POLL; - mdio->priv = dev; mdio->parent = priv->device; @@ -176,7 +167,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) if (ret != 0) { netdev_err(dev, "Cannot register MDIO bus %s\n", mdio->id); - goto out_free_mdio_irq; + goto out_free_mdio; } if (netif_msg_drv(priv)) @@ -184,8 +175,6 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) priv->mdio = mdio; return 0; -out_free_mdio_irq: - kfree(mdio->irq); out_free_mdio: mdiobus_free(mdio); mdio = NULL; @@ -855,7 +844,7 @@ static int init_phy(struct net_device *dev) } netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", - phydev->addr, phydev->phy_id, phydev->link); + phydev->mdio.addr, phydev->phy_id, phydev->link); priv->phydev = phydev; return 0; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 5330bcb8a944..d3977d032b48 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -344,9 +344,6 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr, static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { - /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does - * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) - */ struct net_device *const dev = bus->priv; /* make sure the MAC associated with this @@ -502,7 +499,7 @@ static int au1000_mii_probe(struct net_device *dev) BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); if (aup->phy_addr) - phydev = aup->mii_bus->phy_map[aup->phy_addr]; + phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); else netdev_info(dev, "using PHY-less setup\n"); return 0; @@ -512,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev) * on the current MAC's MII bus */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) - if (aup->mii_bus->phy_map[phy_addr]) { - phydev = aup->mii_bus->phy_map[phy_addr]; + if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) { + phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); if (!aup->phy_search_highest_addr) /* break out with first one found */ break; @@ -531,7 +528,8 @@ static int au1000_mii_probe(struct net_device *dev) */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { struct phy_device *const tmp_phydev = - aup->mii_bus->phy_map[phy_addr]; + mdiobus_get_phy(aup->mii_bus, + phy_addr); if (aup->mac_id == 1) break; @@ -558,7 +556,7 @@ static int au1000_mii_probe(struct net_device *dev) /* now we are supposed to have a proper phydev, to attach to... */ BUG_ON(phydev->attached_dev); - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &au1000_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -583,9 +581,7 @@ static int au1000_mii_probe(struct net_device *dev) aup->old_duplex = -1; aup->phy_dev = phydev; - netdev_info(dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; } @@ -1293,14 +1289,7 @@ static int au1000_probe(struct platform_device *pdev) aup->mii_bus->name = "au1000_eth_mii"; snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, aup->mac_id); - aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (aup->mii_bus->irq == NULL) { - err = -ENOMEM; - goto err_out; - } - for (i = 0; i < PHY_MAX_ADDR; ++i) - aup->mii_bus->irq[i] = PHY_POLL; /* if known, set corresponding PHY IRQs */ if (aup->phy_static_config) if (aup->phy_irq && aup->phy_busid == aup->mac_id) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 53ce1222b11d..8a9b493566c9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2024,7 +2024,6 @@ read_again: skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); - skb_mark_napi_id(skb, napi); napi_gro_receive(napi, skb); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c31e691d11fc..db55c9f6e8e1 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -869,7 +869,7 @@ void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) pdata->mdio_bus = NULL; } -struct xgene_mac_ops xgene_gmac_ops = { +const struct xgene_mac_ops xgene_gmac_ops = { .init = xgene_gmac_init, .reset = xgene_gmac_reset, .rx_enable = xgene_gmac_rx_enable, @@ -879,7 +879,7 @@ struct xgene_mac_ops xgene_gmac_ops = { .set_mac_addr = xgene_gmac_set_mac_addr, }; -struct xgene_port_ops xgene_gport_ops = { +const struct xgene_port_ops xgene_gport_ops = { .reset = xgene_enet_reset, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_gport_shutdown, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index c153a1dc5ff7..8a9091039ab4 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -340,8 +340,8 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); -extern struct xgene_mac_ops xgene_gmac_ops; -extern struct xgene_port_ops xgene_gport_ops; +extern const struct xgene_mac_ops xgene_gmac_ops; +extern const struct xgene_port_ops xgene_gport_ops; extern struct xgene_ring_ops xgene_ring1_ops; #endif /* __XGENE_ENET_HW_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d0ae1a6cc212..a4799c1fc7d4 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -690,7 +690,7 @@ static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) static int xgene_enet_open(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct xgene_mac_ops *mac_ops = pdata->mac_ops; + const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int ret; mac_ops->tx_enable(pdata); @@ -714,7 +714,7 @@ static int xgene_enet_open(struct net_device *ndev) static int xgene_enet_close(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct xgene_mac_ops *mac_ops = pdata->mac_ops; + const struct xgene_mac_ops *mac_ops = pdata->mac_ops; netif_stop_queue(ndev); @@ -1090,7 +1090,7 @@ static const struct net_device_ops xgene_ndev_ops = { }; #ifdef CONFIG_ACPI -static int xgene_get_port_id_acpi(struct device *dev, +static void xgene_get_port_id_acpi(struct device *dev, struct xgene_enet_pdata *pdata) { acpi_status status; @@ -1103,24 +1103,19 @@ static int xgene_get_port_id_acpi(struct device *dev, pdata->port_id = temp; } - return 0; + return; } #endif -static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) +static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) { u32 id = 0; - int ret; - ret = of_property_read_u32(dev->of_node, "port-id", &id); - if (ret) { - pdata->port_id = 0; - ret = 0; - } else { - pdata->port_id = id & BIT(0); - } + of_property_read_u32(dev->of_node, "port-id", &id); - return ret; + pdata->port_id = id & BIT(0); + + return; } static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) @@ -1215,13 +1210,11 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } if (dev->of_node) - ret = xgene_get_port_id_dt(dev, pdata); + xgene_get_port_id_dt(dev, pdata); #ifdef CONFIG_ACPI else - ret = xgene_get_port_id_acpi(dev, pdata); + xgene_get_port_id_acpi(dev, pdata); #endif - if (ret) - return ret; if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) eth_hw_addr_random(ndev); @@ -1429,7 +1422,7 @@ static int xgene_enet_probe(struct platform_device *pdev) struct net_device *ndev; struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; - struct xgene_mac_ops *mac_ops; + const struct xgene_mac_ops *mac_ops; const struct of_device_id *of_id; int ret; @@ -1516,7 +1509,7 @@ err: static int xgene_enet_remove(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; - struct xgene_mac_ops *mac_ops; + const struct xgene_mac_ops *mac_ops; struct net_device *ndev; pdata = platform_get_drvdata(pdev); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 1aa72c787f8d..70d5b62c125a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -174,8 +174,8 @@ struct xgene_enet_pdata { int phy_mode; enum xgene_enet_rm rm; struct rtnl_link_stats64 stats; - struct xgene_mac_ops *mac_ops; - struct xgene_port_ops *port_ops; + const struct xgene_mac_ops *mac_ops; + const struct xgene_port_ops *port_ops; struct xgene_ring_ops *ring_ops; struct delayed_work link_work; u32 port_id; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index 05b817e56fde..78475512b683 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -405,7 +405,7 @@ static void xgene_enet_link_state(struct work_struct *work) schedule_delayed_work(&p->link_work, poll_interval); } -struct xgene_mac_ops xgene_sgmac_ops = { +const struct xgene_mac_ops xgene_sgmac_ops = { .init = xgene_sgmac_init, .reset = xgene_sgmac_reset, .rx_enable = xgene_sgmac_rx_enable, @@ -416,7 +416,7 @@ struct xgene_mac_ops xgene_sgmac_ops = { .link_state = xgene_enet_link_state }; -struct xgene_port_ops xgene_sgport_ops = { +const struct xgene_port_ops xgene_sgport_ops = { .reset = xgene_enet_reset, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_enet_shutdown diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h index de432465009c..29a71b4dcc44 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -35,7 +35,7 @@ #define MPA_IDLE_WITH_QMI_EMPTY BIT(12) #define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc -extern struct xgene_mac_ops xgene_sgmac_ops; -extern struct xgene_port_ops xgene_sgport_ops; +extern const struct xgene_mac_ops xgene_sgmac_ops; +extern const struct xgene_port_ops xgene_sgport_ops; #endif /* __XGENE_ENET_SGMAC_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 7a28a48cb2c7..ba030dc1940b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -326,7 +326,7 @@ static void xgene_enet_link_state(struct work_struct *work) schedule_delayed_work(&pdata->link_work, poll_interval); } -struct xgene_mac_ops xgene_xgmac_ops = { +const struct xgene_mac_ops xgene_xgmac_ops = { .init = xgene_xgmac_init, .reset = xgene_xgmac_reset, .rx_enable = xgene_xgmac_rx_enable, @@ -338,7 +338,7 @@ struct xgene_mac_ops xgene_xgmac_ops = { .link_state = xgene_enet_link_state }; -struct xgene_port_ops xgene_xgport_ops = { +const struct xgene_port_ops xgene_xgport_ops = { .reset = xgene_enet_reset, .cle_bypass = xgene_enet_xgcle_bypass, .shutdown = xgene_enet_shutdown, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index f8f908dbf51c..0a2dca8a1725 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -69,7 +69,7 @@ #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 -extern struct xgene_mac_ops xgene_xgmac_ops; -extern struct xgene_port_ops xgene_xgport_ops; +extern const struct xgene_mac_ops xgene_xgmac_ops; +extern const struct xgene_port_ops xgene_xgport_ops; #endif /* __XGENE_ENET_XGMAC_H__ */ diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 52a6b16f57d2..689045186064 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -34,9 +34,9 @@ config EMAC_ROCKCHIP select ARC_EMAC_CORE depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA ---help--- - Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. + Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. This selects Rockchip SoC glue layer support for the - emac device driver. This driver is used for RK3066/RK3188 + emac device driver. This driver is used for RK3036/RK3066/RK3188 EMAC ethernet controller. endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index c31c7407b753..85e821ccfcd2 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -25,17 +25,13 @@ #include "emac.h" #define DRV_NAME "rockchip_emac" -#define DRV_VERSION "1.0" - -#define GRF_MODE_MII (1UL << 0) -#define GRF_MODE_RMII (0UL << 0) -#define GRF_SPEED_10M (0UL << 1) -#define GRF_SPEED_100M (1UL << 1) -#define GRF_SPEED_ENABLE_BIT (1UL << 17) -#define GRF_MODE_ENABLE_BIT (1UL << 16) +#define DRV_VERSION "1.1" struct emac_rockchip_soc_data { - int grf_offset; + unsigned int grf_offset; + unsigned int grf_mode_offset; + unsigned int grf_speed_offset; + bool need_div_macclk; }; struct rockchip_priv_data { @@ -44,23 +40,22 @@ struct rockchip_priv_data { const struct emac_rockchip_soc_data *soc_data; struct regulator *regulator; struct clk *refclk; + struct clk *macclk; }; static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) { struct rockchip_priv_data *emac = priv; + u32 speed_offset = emac->soc_data->grf_speed_offset; u32 data; int err = 0; - /* write-enable bits */ - data = GRF_SPEED_ENABLE_BIT; - switch(speed) { case 10: - data |= GRF_SPEED_10M; + data = (1 << (speed_offset + 16)) | (0 << speed_offset); break; case 100: - data |= GRF_SPEED_100M; + data = (1 << (speed_offset + 16)) | (1 << speed_offset); break; default: pr_err("speed %u not supported\n", speed); @@ -72,14 +67,25 @@ static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) pr_err("unable to apply speed %u to grf (%d)\n", speed, err); } -static const struct emac_rockchip_soc_data emac_rockchip_dt_data[] = { - { .grf_offset = 0x154 }, /* rk3066 */ - { .grf_offset = 0x0a4 }, /* rk3188 */ +static const struct emac_rockchip_soc_data emac_rk3036_emac_data = { + .grf_offset = 0x140, .grf_mode_offset = 8, + .grf_speed_offset = 9, .need_div_macclk = 1, +}; + +static const struct emac_rockchip_soc_data emac_rk3066_emac_data = { + .grf_offset = 0x154, .grf_mode_offset = 0, + .grf_speed_offset = 1, .need_div_macclk = 0, +}; + +static const struct emac_rockchip_soc_data emac_rk3188_emac_data = { + .grf_offset = 0x0a4, .grf_mode_offset = 0, + .grf_speed_offset = 1, .need_div_macclk = 0, }; static const struct of_device_id emac_rockchip_dt_ids[] = { - { .compatible = "rockchip,rk3066-emac", .data = &emac_rockchip_dt_data[0] }, - { .compatible = "rockchip,rk3188-emac", .data = &emac_rockchip_dt_data[1] }, + { .compatible = "rockchip,rk3036-emac", .data = &emac_rk3036_emac_data }, + { .compatible = "rockchip,rk3066-emac", .data = &emac_rk3066_emac_data }, + { .compatible = "rockchip,rk3188-emac", .data = &emac_rk3188_emac_data }, { /* Sentinel */ } }; @@ -110,7 +116,7 @@ static int emac_rockchip_probe(struct platform_device *pdev) interface = of_get_phy_mode(dev->of_node); - /* RK3066 and RK3188 SoCs only support RMII */ + /* RK3036/RK3066/RK3188 SoCs only support RMII */ if (interface != PHY_INTERFACE_MODE_RMII) { dev_err(dev, "unsupported phy interface mode %d\n", interface); err = -ENOTSUPP; @@ -164,15 +170,12 @@ static int emac_rockchip_probe(struct platform_device *pdev) } } - err = arc_emac_probe(ndev, interface); - if (err) - goto out_regulator_disable; - - /* write-enable bits */ - data = GRF_MODE_ENABLE_BIT | GRF_SPEED_ENABLE_BIT; - - data |= GRF_SPEED_100M; - data |= GRF_MODE_RMII; + /* Set speed 100M */ + data = (1 << (priv->soc_data->grf_speed_offset + 16)) | + (1 << priv->soc_data->grf_speed_offset); + /* Set RMII mode */ + data |= (1 << (priv->soc_data->grf_mode_offset + 16)) | + (0 << priv->soc_data->grf_mode_offset); err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); if (err) { @@ -184,6 +187,33 @@ static int emac_rockchip_probe(struct platform_device *pdev) err = clk_set_rate(priv->refclk, 50000000); if (err) dev_err(dev, "failed to change reference clock rate (%d)\n", err); + + if (priv->soc_data->need_div_macclk) { + priv->macclk = devm_clk_get(dev, "macclk"); + if (IS_ERR(priv->macclk)) { + dev_err(dev, "failed to retrieve mac clock (%ld)\n", PTR_ERR(priv->macclk)); + err = PTR_ERR(priv->macclk); + goto out_regulator_disable; + } + + err = clk_prepare_enable(priv->macclk); + if (err) { + dev_err(dev, "failed to enable mac clock (%d)\n", err); + goto out_regulator_disable; + } + + /* RMII TX/RX needs always a rate of 25MHz */ + err = clk_set_rate(priv->macclk, 25000000); + if (err) + dev_err(dev, "failed to change mac clock rate (%d)\n", err); + } + + err = arc_emac_probe(ndev, interface); + if (err) { + dev_err(dev, "failed to probe arc emac (%d)\n", err); + goto out_regulator_disable; + } + return 0; out_regulator_disable: diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index 7712f068f6d4..1fe35e453d43 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -958,13 +958,13 @@ void alx_configure_basic(struct alx_hw *hw) alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); - raw_mtu = hw->mtu + ETH_HLEN; - alx_write_mem32(hw, ALX_MTU, raw_mtu + 8); - if (raw_mtu > ALX_MTU_JUMBO_TH) + raw_mtu = ALX_RAW_MTU(hw->mtu); + alx_write_mem32(hw, ALX_MTU, raw_mtu); + if (raw_mtu > (ALX_MTU_JUMBO_TH + ETH_FCS_LEN + VLAN_HLEN)) hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; - if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) - val = (raw_mtu + 8 + 7) >> 3; + if (raw_mtu < ALX_TXQ1_JUMBO_TSO_TH) + val = (raw_mtu + 7) >> 3; else val = ALX_TXQ1_JUMBO_TSO_TH >> 3; alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h index 15548802d6f8..f289c05f5cb4 100644 --- a/drivers/net/ethernet/atheros/alx/hw.h +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -37,6 +37,7 @@ #include <linux/types.h> #include <linux/mdio.h> #include <linux/pci.h> +#include <linux/if_vlan.h> #include "reg.h" /* Transmit Packet Descriptor, contains 4 32-bit words. @@ -343,12 +344,14 @@ struct alx_rrd { ALX_RSS_HASH_TYPE_IPV4_TCP | \ ALX_RSS_HASH_TYPE_IPV6 | \ ALX_RSS_HASH_TYPE_IPV6_TCP) -#define ALX_DEF_RXBUF_SIZE 1536 +#define ALX_FRAME_PAD 16 +#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) +#define ALX_MAX_FRAME_LEN(_mtu) (ALIGN((ALX_RAW_MTU(_mtu) + ALX_FRAME_PAD), 8)) +#define ALX_DEF_RXBUF_SIZE ALX_MAX_FRAME_LEN(1500) #define ALX_MAX_JUMBO_PKT_SIZE (9*1024) #define ALX_MAX_TSO_PKT_SIZE (7*1024) #define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE -#define ALX_MIN_FRAME_SIZE 68 -#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) +#define ALX_MIN_FRAME_SIZE (ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) #define ALX_MAX_RX_QUEUES 8 #define ALX_MAX_TX_QUEUES 4 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index bd377a6b067d..55b118e876fd 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -577,7 +577,6 @@ static int alx_alloc_rings(struct alx_priv *alx) alx->int_mask &= ~ALX_ISR_ALL_QUEUES; alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; - alx->tx_ringsz = alx->tx_ringsz; netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); @@ -705,7 +704,7 @@ static int alx_init_sw(struct alx_priv *alx) hw->smb_timer = 400; hw->mtu = alx->dev->mtu; - alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); + alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); alx->tx_ringsz = 256; alx->rx_ringsz = 512; hw->imt = 200; @@ -806,7 +805,7 @@ static void alx_reinit(struct alx_priv *alx) static int alx_change_mtu(struct net_device *netdev, int mtu) { struct alx_priv *alx = netdev_priv(netdev); - int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int max_frame = ALX_MAX_FRAME_LEN(mtu); if ((max_frame < ALX_MIN_FRAME_SIZE) || (max_frame > ALX_MAX_FRAME_SIZE)) @@ -817,8 +816,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) netdev->mtu = mtu; alx->hw.mtu = mtu; - alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? - ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; + alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); netdev_update_features(netdev); if (netif_running(netdev)) alx_reinit(alx); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index a3b1c07ae0af..74f0a37c4eb6 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2263,24 +2263,16 @@ static int b44_register_phy_one(struct b44 *bp) mii_bus->parent = sdev->dev; mii_bus->phy_mask = ~(1 << bp->phy_addr); snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance); - mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!mii_bus->irq) { - dev_err(sdev->dev, "mii_bus irq allocation failed\n"); - err = -ENOMEM; - goto err_out_mdiobus; - } - - memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR); bp->mii_bus = mii_bus; err = mdiobus_register(mii_bus); if (err) { dev_err(sdev->dev, "failed to register MII bus\n"); - goto err_out_mdiobus_irq; + goto err_out_mdiobus; } - if (!bp->mii_bus->phy_map[bp->phy_addr] && + if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) && (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) { dev_info(sdev->dev, @@ -2313,19 +2305,15 @@ static int b44_register_phy_one(struct b44 *bp) bp->phydev = phydev; bp->old_link = 0; - bp->phy_addr = phydev->addr; + bp->phy_addr = phydev->mdio.addr; - dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; err_out_mdiobus_unregister: mdiobus_unregister(mii_bus); -err_out_mdiobus_irq: - kfree(mii_bus->irq); - err_out_mdiobus: mdiobus_free(mii_bus); @@ -2339,7 +2327,6 @@ static void b44_unregister_phy_one(struct b44 *bp) phy_disconnect(bp->phydev); mdiobus_unregister(mii_bus); - kfree(mii_bus->irq); mdiobus_free(mii_bus); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 8b1929e9f698..87c6b5bdd616 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -908,8 +908,7 @@ static int bcm_enet_open(struct net_device *dev) else phydev->advertising &= ~SUPPORTED_Pause; - dev_info(kdev, "attached PHY at address %d [%s]\n", - phydev->addr, phydev->drv->name); + phy_attached_info(phydev); priv->old_link = 0; priv->old_duplex = -1; @@ -1849,17 +1848,8 @@ static int bcm_enet_probe(struct platform_device *pdev) * if a slave is not present on hw */ bus->phy_mask = ~(1 << priv->phy_id); - bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, - GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto out_free_mdio; - } - if (priv->has_phy_interrupt) bus->irq[priv->phy_id] = priv->phy_interrupt; - else - bus->irq[priv->phy_id] = PHY_POLL; ret = mdiobus_register(bus); if (ret) { @@ -2884,33 +2874,21 @@ struct platform_driver bcm63xx_enet_shared_driver = { }, }; +static struct platform_driver * const drivers[] = { + &bcm63xx_enet_shared_driver, + &bcm63xx_enet_driver, + &bcm63xx_enetsw_driver, +}; + /* entry point */ static int __init bcm_enet_init(void) { - int ret; - - ret = platform_driver_register(&bcm63xx_enet_shared_driver); - if (ret) - return ret; - - ret = platform_driver_register(&bcm63xx_enet_driver); - if (ret) - platform_driver_unregister(&bcm63xx_enet_shared_driver); - - ret = platform_driver_register(&bcm63xx_enetsw_driver); - if (ret) { - platform_driver_unregister(&bcm63xx_enet_driver); - platform_driver_unregister(&bcm63xx_enet_shared_driver); - } - - return ret; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit bcm_enet_exit(void) { - platform_driver_unregister(&bcm63xx_enet_driver); - platform_driver_unregister(&bcm63xx_enetsw_driver); - platform_driver_unregister(&bcm63xx_enet_shared_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 858106352ce9..993c780bdfab 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1216,7 +1216,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* Initialize SW view of the ring */ spin_lock_init(&ring->lock); ring->priv = priv; - netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); + netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; ring->alloc_size = ring->size; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 28f7610b03fe..c7798d360512 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1471,7 +1471,7 @@ static int bgmac_mii_register(struct bgmac *bgmac) struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; - int i, err = 0; + int err = 0; if (ci->id == BCMA_CHIP_ID_BCM4707 || ci->id == BCMA_CHIP_ID_BCM53018) @@ -1490,18 +1490,10 @@ static int bgmac_mii_register(struct bgmac *bgmac) mii_bus->parent = &bgmac->core->dev; mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!mii_bus->irq) { - err = -ENOMEM; - goto err_free_bus; - } - for (i = 0; i < PHY_MAX_ADDR; i++) - mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(mii_bus); if (err) { bgmac_err(bgmac, "Registration of mii bus failed\n"); - goto err_free_irq; + goto err_free_bus; } bgmac->mii_bus = mii_bus; @@ -1522,8 +1514,6 @@ static int bgmac_mii_register(struct bgmac *bgmac) err_unregister_bus: mdiobus_unregister(mii_bus); -err_free_irq: - kfree(mii_bus->irq); err_free_bus: mdiobus_free(mii_bus); return err; @@ -1534,7 +1524,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac) struct mii_bus *mii_bus = bgmac->mii_bus; mdiobus_unregister(mii_bus); - kfree(mii_bus->irq); mdiobus_free(mii_bus); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index b5e64b02200c..cae0956186ce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -540,10 +540,6 @@ struct bnx2x_fastpath { struct napi_struct napi; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned long busy_poll_state; -#endif - union host_hc_status_block status_blk; /* chip independent shortcuts into sb structure */ __le16 *sb_index_values; @@ -594,8 +590,6 @@ struct bnx2x_fastpath { /* The last maximal completed SGE */ u16 last_max_sge; __le16 *rx_cons_sb; - unsigned long rx_pkt, - rx_calls; /* TPA related */ struct bnx2x_agg_info *tpa_info; @@ -617,115 +611,6 @@ struct bnx2x_fastpath { #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) -#ifdef CONFIG_NET_RX_BUSY_POLL - -enum bnx2x_fp_state { - BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */ - - BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */ - BNX2X_STATE_FP_NAPI_REQ = BIT(1), - - BNX2X_STATE_FP_POLL_BIT = 2, - BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */ - - BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */ -}; - -static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) -{ - WRITE_ONCE(fp->busy_poll_state, 0); -} - -/* called from the device poll routine to get ownership of a FP */ -static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) -{ - unsigned long prev, old = READ_ONCE(fp->busy_poll_state); - - while (1) { - switch (old) { - case BNX2X_STATE_FP_POLL: - /* make sure bnx2x_fp_lock_poll() wont starve us */ - set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT, - &fp->busy_poll_state); - /* fallthrough */ - case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI); - if (unlikely(prev != old)) { - old = prev; - continue; - } - return true; - } -} - -static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) -{ - smp_wmb(); - fp->busy_poll_state = 0; -} - -/* called from bnx2x_low_latency_poll() */ -static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) -{ - return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0; -} - -static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) -{ - smp_mb__before_atomic(); - clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state); -} - -/* true if a socket is polling */ -static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) -{ - return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; -} - -/* false if fp is currently owned */ -static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) -{ - set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state); - return !bnx2x_fp_ll_polling(fp); - -} -#else -static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) -{ -} - -static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) -{ - return true; -} - -static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) -{ -} - -static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) -{ - return false; -} - -static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) -{ -} - -static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) -{ - return false; -} -static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index c82ab87fcbe8..9695a4c4a434 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -46,7 +46,6 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) for_each_rx_queue_cnic(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); - napi_hash_add(&bnx2x_fp(bp, i, napi)); } } @@ -58,7 +57,6 @@ static void bnx2x_add_all_napi(struct bnx2x *bp) for_each_eth_queue(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); - napi_hash_add(&bnx2x_fp(bp, i, napi)); } } @@ -560,10 +558,8 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, put_page(pool->page); pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); - if (unlikely(!pool->page)) { - BNX2X_ERR("Can't alloc sge\n"); + if (unlikely(!pool->page)) return -ENOMEM; - } pool->offset = 0; } @@ -747,7 +743,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); break; default: - BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", be16_to_cpu(skb->protocol)); } } @@ -1094,12 +1090,7 @@ reuse_rx: __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); - skb_mark_napi_id(skb, &fp->napi); - - if (bnx2x_fp_ll_polling(fp)) - netif_receive_skb(skb); - else - napi_gro_receive(&fp->napi, skb); + napi_gro_receive(&fp->napi, skb); next_rx: rx_buf->data = NULL; @@ -1131,9 +1122,6 @@ next_cqe: bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, fp->rx_sge_prod); - fp->rx_pkt += rx_pkt; - fp->rx_calls++; - return rx_pkt; } @@ -1869,7 +1857,6 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) int i; for_each_rx_queue_cnic(bp, i) { - bnx2x_fp_busy_poll_init(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); } } @@ -1879,7 +1866,6 @@ static void bnx2x_napi_enable(struct bnx2x *bp) int i; for_each_eth_queue(bp, i) { - bnx2x_fp_busy_poll_init(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); } } @@ -1890,8 +1876,6 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_ll_disable(&bp->fp[i])) - usleep_range(1000, 2000); } } @@ -1901,8 +1885,6 @@ static void bnx2x_napi_disable(struct bnx2x *bp) for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_ll_disable(&bp->fp[i])) - usleep_range(1000, 2000); } } @@ -3219,49 +3201,32 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) */ static int bnx2x_poll(struct napi_struct *napi, int budget) { - int work_done = 0; - u8 cos; struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, napi); struct bnx2x *bp = fp->bp; + int rx_work_done; + u8 cos; - while (1) { #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - napi_complete(napi); - return 0; - } + if (unlikely(bp->panic)) { + napi_complete(napi); + return 0; + } #endif - if (!bnx2x_fp_lock_napi(fp)) - return budget; - - for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) - bnx2x_tx_int(bp, fp->txdata_ptr[cos]); - - if (bnx2x_has_rx_work(fp)) { - work_done += bnx2x_rx_int(fp, budget - work_done); - - /* must not complete if we consumed full budget */ - if (work_done >= budget) { - bnx2x_fp_unlock_napi(fp); - break; - } - } - - bnx2x_fp_unlock_napi(fp); + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) + bnx2x_tx_int(bp, fp->txdata_ptr[cos]); - /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0; - /* No need to update SB for FCoE L2 ring as long as - * it's connected to the default SB and the SB - * has been updated when NAPI was scheduled. - */ - if (IS_FCOE_FP(fp)) { - napi_complete(napi); - break; - } + if (rx_work_done < budget) { + /* No need to update SB for FCoE L2 ring as long as + * it's connected to the default SB and the SB + * has been updated when NAPI was scheduled. + */ + if (IS_FCOE_FP(fp)) { + napi_complete(napi); + } else { bnx2x_update_fpsb_idx(fp); /* bnx2x_has_rx_work() reads the status block, * thus we need to ensure that status block indices @@ -3286,40 +3251,15 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, le16_to_cpu(fp->fp_hc_idx), IGU_INT_ENABLE, 1); - break; + } else { + rx_work_done = budget; } } } - return work_done; + return rx_work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -int bnx2x_low_latency_recv(struct napi_struct *napi) -{ - struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, - napi); - struct bnx2x *bp = fp->bp; - int found = 0; - - if ((bp->state == BNX2X_STATE_CLOSED) || - (bp->state == BNX2X_STATE_ERROR) || - (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) - return LL_FLUSH_FAILED; - - if (!bnx2x_fp_lock_poll(fp)) - return LL_FLUSH_BUSY; - - if (bnx2x_has_rx_work(fp)) - found = bnx2x_rx_int(fp, 4); - - bnx2x_fp_unlock_poll(fp); - - return found; -} -#endif - /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs @@ -4494,7 +4434,6 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, /* Limit the CQE producer by the CQE ring size */ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, cqe_ring_prod); - fp->rx_pkt = fp->rx_calls = 0; bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index b7d32e8412f1..4cbb03f87b5a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -570,13 +570,6 @@ int bnx2x_enable_msix(struct bnx2x *bp); int bnx2x_enable_msi(struct bnx2x *bp); /** - * bnx2x_low_latency_recv - LL callback - * - * @napi: napi structure - */ -int bnx2x_low_latency_recv(struct napi_struct *napi); - -/** * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure * * @bp: driver handle diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index d84efcd34fac..820b7e04bb5f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -52,7 +52,7 @@ static const struct { { Q_STATS_OFFSET32(rx_skb_alloc_failed), 4, "[%s]: rx_skb_alloc_discard" }, { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, - + { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" }, { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "[%s]: tx_ucast_packets" }, @@ -74,117 +74,115 @@ static const struct { static const struct { long offset; int size; - u32 flags; -#define STATS_FLAGS_PORT 1 -#define STATS_FLAGS_FUNC 2 -#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) + bool is_port_stat; char string[ETH_GSTRING_LEN]; } bnx2x_stats_arr[] = { /* 1 */ { STATS_OFFSET32(total_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bytes" }, + 8, false, "rx_bytes" }, { STATS_OFFSET32(error_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, + 8, false, "rx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, + 8, false, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, + 8, false, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, + 8, false, "rx_bcast_packets" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), - 8, STATS_FLAGS_PORT, "rx_crc_errors" }, + 8, true, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), - 8, STATS_FLAGS_PORT, "rx_align_errors" }, + 8, true, "rx_align_errors" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, + 8, true, "rx_undersize_packets" }, { STATS_OFFSET32(etherstatsoverrsizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, + 8, true, "rx_oversize_packets" }, /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), - 8, STATS_FLAGS_PORT, "rx_fragments" }, + 8, true, "rx_fragments" }, { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), - 8, STATS_FLAGS_PORT, "rx_jabbers" }, + 8, true, "rx_jabbers" }, { STATS_OFFSET32(no_buff_discard_hi), - 8, STATS_FLAGS_BOTH, "rx_discards" }, + 8, false, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), - 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, + 4, true, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), - 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, + 4, true, "rx_mf_tag_discard" }, { STATS_OFFSET32(pfc_frames_received_hi), - 8, STATS_FLAGS_PORT, "pfc_frames_received" }, + 8, true, "pfc_frames_received" }, { STATS_OFFSET32(pfc_frames_sent_hi), - 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, + 8, true, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), - 8, STATS_FLAGS_PORT, "rx_brb_discard" }, + 8, true, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), - 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, + 8, true, "rx_brb_truncate" }, { STATS_OFFSET32(pause_frames_received_hi), - 8, STATS_FLAGS_PORT, "rx_pause_frames" }, + 8, true, "rx_pause_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), - 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, + 8, true, "rx_mac_ctrl_frames" }, { STATS_OFFSET32(nig_timer_max), - 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, + 4, true, "rx_constant_pause_events" }, /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), - 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, + 4, false, "rx_phy_ip_err_discards"}, { STATS_OFFSET32(rx_skb_alloc_failed), - 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, + 4, false, "rx_skb_alloc_discard" }, { STATS_OFFSET32(hw_csum_err), - 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, - + 4, false, "rx_csum_offload_errors" }, + { STATS_OFFSET32(driver_xoff), + 4, false, "tx_exhaustion_events" }, { STATS_OFFSET32(total_bytes_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bytes" }, + 8, false, "tx_bytes" }, { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), - 8, STATS_FLAGS_PORT, "tx_error_bytes" }, + 8, true, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, + 8, false, "tx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, + 8, false, "tx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, + 8, false, "tx_bcast_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), - 8, STATS_FLAGS_PORT, "tx_mac_errors" }, + 8, true, "tx_mac_errors" }, { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), - 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, + 8, true, "tx_carrier_errors" }, /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_single_collisions" }, + 8, true, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, + 8, true, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), - 8, STATS_FLAGS_PORT, "tx_deferred" }, + 8, true, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, + 8, true, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_late_collisions" }, + 8, true, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), - 8, STATS_FLAGS_PORT, "tx_total_collisions" }, + 8, true, "tx_total_collisions" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), - 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, + 8, true, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), - 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, + 8, true, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), - 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, + 8, true, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), - 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, + 8, true, "tx_256_to_511_byte_packets" }, /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), - 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, + 8, true, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, + 8, true, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, + 8, true, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), - 8, STATS_FLAGS_PORT, "tx_pause_frames" }, + 8, true, "tx_pause_frames" }, { STATS_OFFSET32(total_tpa_aggregations_hi), - 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, + 8, false, "tpa_aggregations" }, { STATS_OFFSET32(total_tpa_aggregated_frames_hi), - 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, + 8, false, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), - 8, STATS_FLAGS_FUNC, "tpa_bytes"}, + 8, false, "tpa_bytes"}, { STATS_OFFSET32(recoverable_error), - 4, STATS_FLAGS_FUNC, "recoverable_errors" }, + 4, false, "recoverable_errors" }, { STATS_OFFSET32(unrecoverable_error), - 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, + 4, false, "unrecoverable_errors" }, { STATS_OFFSET32(driver_filtered_tx_pkt), - 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" }, + 4, false, "driver_filtered_tx_pkt" }, { STATS_OFFSET32(eee_tx_lpi), - 4, STATS_FLAGS_PORT, "Tx LPI entry count"} + 4, true, "Tx LPI entry count"} }; #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) @@ -3065,12 +3063,8 @@ static void bnx2x_self_test(struct net_device *dev, } } -#define IS_PORT_STAT(i) \ - ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) -#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define HIDE_PORT_STAT(bp) \ - ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ - IS_VF(bp)) +#define IS_PORT_STAT(i) (bnx2x_stats_arr[i].is_port_stat) +#define HIDE_PORT_STAT(bp) IS_VF(bp) /* ethtool statistics are displayed for all regular ethernet queues and the * fcoe L2 queue if not disabled @@ -3094,7 +3088,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) num_strings = 0; if (HIDE_PORT_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) - if (IS_FUNC_STAT(i)) + if (!IS_PORT_STAT(i)) num_strings++; } else num_strings += BNX2X_NUM_STATS; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index cafd5de675cf..27aa0802d87d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3013,8 +3013,8 @@ struct afex_stats { }; #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 12 -#define BCM_5710_FW_REVISION_VERSION 30 +#define BCM_5710_FW_MINOR_VERSION 13 +#define BCM_5710_FW_REVISION_VERSION 1 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3583,7 +3583,7 @@ enum classify_rule { CLASSIFY_RULE_OPCODE_MAC, CLASSIFY_RULE_OPCODE_VLAN, CLASSIFY_RULE_OPCODE_PAIR, - CLASSIFY_RULE_OPCODE_VXLAN, + CLASSIFY_RULE_OPCODE_IMAC_VNI, MAX_CLASSIFY_RULE }; @@ -3826,6 +3826,17 @@ struct eth_classify_header { __le32 echo; }; +/* + * Command for adding/removing a Inner-MAC/VNI classification rule + */ +struct eth_classify_imac_vni_cmd { + struct eth_classify_cmd_header header; + __le32 vni; + __le16 imac_lsb; + __le16 imac_mid; + __le16 imac_msb; + __le16 reserved1; +}; /* * Command for adding/removing a MAC classification rule @@ -3869,14 +3880,6 @@ struct eth_classify_vlan_cmd { /* * Command for adding/removing a VXLAN classification rule */ -struct eth_classify_vxlan_cmd { - struct eth_classify_cmd_header header; - __le32 vni; - __le16 inner_mac_lsb; - __le16 inner_mac_mid; - __le16 inner_mac_msb; - __le16 reserved1; -}; /* * union for eth classification rule @@ -3885,7 +3888,7 @@ union eth_classify_rule_cmd { struct eth_classify_mac_cmd mac; struct eth_classify_vlan_cmd vlan; struct eth_classify_pair_cmd pair; - struct eth_classify_vxlan_cmd vxlan; + struct eth_classify_imac_vni_cmd imac_vni; }; /* @@ -5623,6 +5626,14 @@ enum igu_mode { MAX_IGU_MODE }; +/* + * Inner Headers Classification Type + */ +enum inner_clss_type { + INNER_CLSS_DISABLED, + INNER_CLSS_USE_VLAN, + INNER_CLSS_USE_VNI, + MAX_INNER_CLSS_TYPE}; /* * IP versions @@ -5953,14 +5964,6 @@ enum ts_offset_cmd { MAX_TS_OFFSET_CMD }; -/* Tunnel Mode */ -enum tunnel_mode { - TUNN_MODE_NONE, - TUNN_MODE_VXLAN, - TUNN_MODE_GRE, - MAX_TUNNEL_MODE -}; - /* zone A per-queue data */ struct ustorm_queue_zone_data { struct ustorm_eth_rx_producers eth_rx_producers; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2e611dc5f162..6c4e3a69976f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13004,9 +13004,6 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = bnx2x_low_latency_recv, -#endif .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 07f5f239cb65..df835f5e46d8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -72,8 +72,10 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define BNXT_TX_PUSH_THRESH 92 enum board_idx { + BCM57301, BCM57302, BCM57304, + BCM57402, BCM57404, BCM57406, BCM57304_VF, @@ -84,17 +86,21 @@ enum board_idx { static const struct { char *name; } board_info[] = { - { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" }, + { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, + { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, #ifdef CONFIG_BNXT_SRIOV @@ -173,7 +179,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) u32 len, free_size, vlan_tag_flags, cfa_action, flags; u16 prod, last_frag; struct pci_dev *pdev = bp->pdev; - struct bnxt_napi *bnapi; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; @@ -183,8 +188,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; + txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(dev, i); prod = txr->tx_prod; @@ -417,8 +421,8 @@ tx_dma_error: static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) { - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; - int index = bnapi->index; + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + int index = txr - &bp->tx_ring[0]; struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); u16 cons = txr->tx_cons; struct pci_dev *pdev = bp->pdev; @@ -596,7 +600,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, { struct bnxt *bp = bnapi->bp; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u16 sw_prod = rxr->rx_sw_agg_prod; u32 i; @@ -675,7 +679,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, { struct pci_dev *pdev = bp->pdev; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u32 i; @@ -856,8 +860,13 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, struct tcphdr *th; int payload_off, tcp_opt_len = 0; int len, nw_off; + u16 segs; + + segs = TPA_END_TPA_SEGS(tpa_end); + if (segs == 1) + return skb; - NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end); + NAPI_GRO_CB(skb)->count = segs; skb_shinfo(skb)->gso_size = le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); skb_shinfo(skb)->gso_type = tpa_info->gso_type; @@ -929,7 +938,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, bool *agg_event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u8 agg_id = TPA_END_AGG_ID(tpa_end); u8 *data, agg_bufs; u16 cp_cons = RING_CMP(*raw_cons); @@ -1045,7 +1054,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, bool *agg_event) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; struct net_device *dev = bp->dev; struct rx_cmp *rxcmp; struct rx_cmp_ext *rxcmp1; @@ -1187,8 +1196,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb->csum_level = RX_CMP_ENCAP(rxcmp1); } } else { - if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) - cpr->rx_l4_csum_errors++; + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { + if (dev->features & NETIF_F_RXCSUM) + cpr->rx_l4_csum_errors++; + } } skb_record_rx_queue(skb, bnapi->index); @@ -1377,7 +1388,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) bnxt_tx_int(bp, bnapi, tx_pkts); if (rx_event) { - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); @@ -1446,19 +1457,14 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) int i, max_idx; struct pci_dev *pdev = bp->pdev; - if (!bp->bnapi) + if (!bp->tx_ring) return; max_idx = bp->tx_nr_pages * TX_DESC_CNT; for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; int j; - if (!bnapi) - continue; - - txr = &bnapi->tx_ring; for (j = 0; j < max_idx;) { struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; struct sk_buff *skb = tx_buf->skb; @@ -1504,21 +1510,15 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) int i, max_idx, max_agg_idx; struct pci_dev *pdev = bp->pdev; - if (!bp->bnapi) + if (!bp->rx_ring) return; max_idx = bp->rx_nr_pages * RX_DESC_CNT; max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; int j; - if (!bnapi) - continue; - - rxr = &bnapi->rx_ring; - if (rxr->rx_tpa) { for (j = 0; j < MAX_TPA; j++) { struct bnxt_tpa_info *tpa_info = @@ -1646,19 +1646,13 @@ static void bnxt_free_rx_rings(struct bnxt *bp) { int i; - if (!bp->bnapi) + if (!bp->rx_ring) return; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - rxr = &bnapi->rx_ring; - kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -1677,6 +1671,9 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) { int i, rc, agg_rings = 0, tpa_rings = 0; + if (!bp->rx_ring) + return -ENOMEM; + if (bp->flags & BNXT_FLAG_AGG_RINGS) agg_rings = 1; @@ -1684,14 +1681,9 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) tpa_rings = 1; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - rxr = &bnapi->rx_ring; ring = &rxr->rx_ring_struct; rc = bnxt_alloc_ring(bp, ring); @@ -1729,19 +1721,13 @@ static void bnxt_free_tx_rings(struct bnxt *bp) int i; struct pci_dev *pdev = bp->pdev; - if (!bp->bnapi) + if (!bp->tx_ring) return; for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - txr = &bnapi->tx_ring; - if (txr->tx_push) { dma_free_coherent(&pdev->dev, bp->tx_push_size, txr->tx_push, txr->tx_push_mapping); @@ -1775,14 +1761,9 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) } for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring; - if (!bnapi) - continue; - - txr = &bnapi->tx_ring; ring = &txr->tx_ring_struct; rc = bnxt_alloc_ring(bp, ring); @@ -1885,7 +1866,10 @@ static void bnxt_init_ring_struct(struct bnxt *bp) ring->dma_arr = cpr->cp_desc_mapping; ring->vmem_size = 0; - rxr = &bnapi->rx_ring; + rxr = bnapi->rx_ring; + if (!rxr) + goto skip_rx; + ring = &rxr->rx_ring_struct; ring->nr_pages = bp->rx_nr_pages; ring->page_size = HW_RXBD_RING_SIZE; @@ -1902,7 +1886,11 @@ static void bnxt_init_ring_struct(struct bnxt *bp) ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; ring->vmem = (void **)&rxr->rx_agg_ring; - txr = &bnapi->tx_ring; +skip_rx: + txr = bnapi->tx_ring; + if (!txr) + continue; + ring = &txr->tx_ring_struct; ring->nr_pages = bp->tx_nr_pages; ring->page_size = HW_RXBD_RING_SIZE; @@ -1938,22 +1926,18 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) { struct net_device *dev = bp->dev; - struct bnxt_napi *bnapi = bp->bnapi[ring_nr]; struct bnxt_rx_ring_info *rxr; struct bnxt_ring_struct *ring; u32 prod, type; int i; - if (!bnapi) - return -EINVAL; - type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; if (NET_IP_ALIGN == 2) type |= RX_BD_FLAGS_SOP; - rxr = &bnapi->rx_ring; + rxr = &bp->rx_ring[ring_nr]; ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); @@ -1969,11 +1953,12 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) rxr->rx_prod = prod; ring->fw_ring_id = INVALID_HW_RING_ID; + ring = &rxr->rx_agg_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) return 0; - ring = &rxr->rx_agg_ring_struct; - type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; @@ -1989,7 +1974,6 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) prod = NEXT_RX_AGG(prod); } rxr->rx_agg_prod = prod; - ring->fw_ring_id = INVALID_HW_RING_ID; if (bp->flags & BNXT_FLAG_TPA) { if (rxr->rx_tpa) { @@ -2035,8 +2019,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp) MAX_SKB_FRAGS + 1); for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; @@ -2423,14 +2406,18 @@ static void bnxt_clear_ring_indices(struct bnxt *bp) cpr = &bnapi->cp_ring; cpr->cp_raw_cons = 0; - txr = &bnapi->tx_ring; - txr->tx_prod = 0; - txr->tx_cons = 0; + txr = bnapi->tx_ring; + if (txr) { + txr->tx_prod = 0; + txr->tx_cons = 0; + } - rxr = &bnapi->rx_ring; - rxr->rx_prod = 0; - rxr->rx_agg_prod = 0; - rxr->rx_sw_agg_prod = 0; + rxr = bnapi->rx_ring; + if (rxr) { + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + } } } @@ -2496,6 +2483,10 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); + kfree(bp->tx_ring); + bp->tx_ring = NULL; + kfree(bp->rx_ring); + bp->rx_ring = NULL; kfree(bp->bnapi); bp->bnapi = NULL; } else { @@ -2505,7 +2496,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) { - int i, rc, size, arr_size; + int i, j, rc, size, arr_size; void *bnapi; if (irq_re_init) { @@ -2527,6 +2518,33 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) bp->bnapi[i]->bp = bp; } + bp->rx_ring = kcalloc(bp->rx_nr_rings, + sizeof(struct bnxt_rx_ring_info), + GFP_KERNEL); + if (!bp->rx_ring) + return -ENOMEM; + + for (i = 0; i < bp->rx_nr_rings; i++) { + bp->rx_ring[i].bnapi = bp->bnapi[i]; + bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; + } + + bp->tx_ring = kcalloc(bp->tx_nr_rings, + sizeof(struct bnxt_tx_ring_info), + GFP_KERNEL); + if (!bp->tx_ring) + return -ENOMEM; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + j = 0; + else + j = bp->rx_nr_rings; + + for (i = 0; i < bp->tx_nr_rings; i++, j++) { + bp->tx_ring[i].bnapi = bp->bnapi[j]; + bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; + } + rc = bnxt_alloc_stats(bp); if (rc) goto alloc_mem_err; @@ -2596,6 +2614,9 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) /* Write request msg to hwrm channel */ __iowrite32_copy(bp->bar0, data, msg_len / 4); + for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) + writel(0, bp->bar0 + i); + /* currently supports only one outstanding message */ if (intr_process) bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & @@ -2710,6 +2731,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) +{ + struct hwrm_func_drv_unrgtr_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) { u32 rc = 0; @@ -2772,7 +2801,7 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); - req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id); + req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); @@ -2805,7 +2834,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ - CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID) + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) @@ -2824,7 +2853,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.ethertype = htons(ETH_P_IP); memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); - req.ipaddr_type = 4; + req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.ip_protocol = keys->basic.ip_proto; req.src_ipaddr[0] = keys->addrs.v4addrs.src; @@ -2837,7 +2866,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.dst_port = keys->ports.dst; req.dst_port_mask = cpu_to_be16(0xffff); - req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req.dst_id = cpu_to_le16(vnic->fw_vnic_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) @@ -2857,10 +2886,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); - req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); + req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); req.enables = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | - CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); memcpy(req.l2_addr, mac_addr, ETH_ALEN); req.l2_addr_mask[0] = 0xff; @@ -2930,7 +2959,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) req.enables = cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | - VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS); + VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | + VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); /* Number of segs are log2 units, and first packet is not * included as part of this units. @@ -2948,6 +2978,8 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) segs = ilog2(nsegs); req.max_agg_segs = cpu_to_le16(segs); req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + + req.min_agg_len = cpu_to_le32(512); } req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); @@ -3058,7 +3090,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { - int grp_idx = 0; + unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input req = {0}; @@ -3069,10 +3101,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); req.cos_rule = cpu_to_le16(0xffff); if (vnic->flags & BNXT_VNIC_RSS_FLAG) - grp_idx = 0; + ring = 0; else if (vnic->flags & BNXT_VNIC_RFS_FLAG) - grp_idx = vnic_id - 1; + ring = vnic_id - 1; + grp_idx = bp->rx_ring[ring].bnapi->index; req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); @@ -3113,22 +3146,25 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp) bnxt_hwrm_vnic_free_one(bp, i); } -static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id, - u16 end_grp_id) +static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, + unsigned int start_rx_ring_idx, + unsigned int nr_rings) { - u32 rc = 0, i, j; + int rc = 0; + unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; struct hwrm_vnic_alloc_input req = {0}; struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; /* map ring groups to this vnic */ - for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) { - if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) { + for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { + grp_idx = bp->rx_ring[i].bnapi->index; + if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", - j, (end_grp_id - start_grp_id)); + j, nr_rings); break; } bp->vnic_info[vnic_id].fw_grp_ids[j] = - bp->grp_info[i].fw_grp_id; + bp->grp_info[grp_idx].fw_grp_id; } bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; @@ -3155,20 +3191,22 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) struct hwrm_ring_grp_alloc_input req = {0}; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; + unsigned int grp_idx = bp->rx_ring[i].bnapi->index; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); - req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); - req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id); - req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id); - req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx); + req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); + req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); + req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) break; - bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id); + bp->grp_info[grp_idx].fw_grp_id = + le32_to_cpu(resp->ring_group_id); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -3293,75 +3331,66 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { int i, rc = 0; - if (bp->cp_nr_rings) { - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_CMPL, i, - INVALID_STATS_CTX_ID); - if (rc) - goto err_out; - cpr->cp_doorbell = bp->bar1 + i * 0x80; - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; - } + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + cpr->cp_doorbell = bp->bar1 + i * 0x80; + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; } - if (bp->tx_nr_rings) { - for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx; + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 map_idx = txr->bnapi->index; + u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_TX, i, - fw_stats_ctx); - if (rc) - goto err_out; - txr->tx_doorbell = bp->bar1 + i * 0x80; - } + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, + map_idx, fw_stats_ctx); + if (rc) + goto err_out; + txr->tx_doorbell = bp->bar1 + map_idx * 0x80; } - if (bp->rx_nr_rings) { - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 map_idx = rxr->bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_RX, i, - INVALID_STATS_CTX_ID); - if (rc) - goto err_out; - rxr->rx_doorbell = bp->bar1 + i * 0x80; - writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); - bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; - } + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, + map_idx, INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; } if (bp->flags & BNXT_FLAG_AGG_RINGS) { for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 map_idx = grp_idx + bp->rx_nr_rings; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_AGG, - bp->rx_nr_rings + i, + map_idx, INVALID_STATS_CTX_ID); if (rc) goto err_out; - rxr->rx_agg_doorbell = - bp->bar1 + (bp->rx_nr_rings + i) * 0x80; + rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); - bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id; + bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } err_out: @@ -3408,91 +3437,75 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, return 0; } -static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) +static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { - int i, rc = 0; + int i; if (!bp->bnapi) - return 0; + return; - if (bp->tx_nr_rings) { - for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_TX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - } + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 grp_idx = txr->bnapi->index; + u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_TX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; } } - if (bp->rx_nr_rings) { - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_RX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[i].rx_fw_ring_id = - INVALID_HW_RING_ID; - } + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].rx_fw_ring_id = + INVALID_HW_RING_ID; } } - if (bp->rx_agg_nr_pages) { - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; - struct bnxt_ring_struct *ring = - &rxr->rx_agg_ring_struct; - u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_RX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[i].agg_fw_ring_id = - INVALID_HW_RING_ID; - } + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].agg_fw_ring_id = + INVALID_HW_RING_ID; } } - if (bp->cp_nr_rings) { - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg( - bp, ring, - RING_FREE_REQ_RING_TYPE_CMPL, - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[i].cp_fw_ring_id = - INVALID_HW_RING_ID; - } + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; } } - - return rc; } int bnxt_hwrm_set_coal(struct bnxt *bp) @@ -3604,7 +3617,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) return 0; } -static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {0}; @@ -3628,9 +3641,10 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); - pf->max_pf_tx_rings = pf->max_tx_rings; pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); - pf->max_pf_rx_rings = pf->max_rx_rings; + pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); + if (!pf->max_hw_ring_grps) + pf->max_hw_ring_grps = pf->max_tx_rings; pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); pf->max_vnics = le16_to_cpu(resp->max_vnics); pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); @@ -3658,6 +3672,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); + if (!vf->max_hw_ring_grps) + vf->max_hw_ring_grps = vf->max_tx_rings; vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); vf->max_vnics = le16_to_cpu(resp->max_vnics); vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); @@ -3734,14 +3751,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); - if (req.hwrm_intf_maj != resp->hwrm_intf_maj || - req.hwrm_intf_min != resp->hwrm_intf_min || - req.hwrm_intf_upd != resp->hwrm_intf_upd) { - netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n", + if (resp->hwrm_intf_maj < 1) { + netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", resp->hwrm_intf_maj, resp->hwrm_intf_min, - resp->hwrm_intf_upd, req.hwrm_intf_maj, - req.hwrm_intf_min, req.hwrm_intf_upd); - netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n"); + resp->hwrm_intf_upd); + netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, @@ -3867,7 +3881,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) break; bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; - rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1); + rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", vnic_id, rc); @@ -3944,8 +3958,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) } bp->vnic_info[0].uc_filter_count = 1; - bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST | - CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) bp->vnic_info[0].rx_mask |= @@ -4026,20 +4039,42 @@ static int bnxt_set_real_num_queues(struct bnxt *bp) return rc; #ifdef CONFIG_RFS_ACCEL - if (bp->rx_nr_rings) + if (bp->flags & BNXT_FLAG_RFS) dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); - if (!dev->rx_cpu_rmap) - rc = -ENOMEM; #endif return rc; } +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared) +{ + int _rx = *rx, _tx = *tx; + + if (shared) { + *rx = min_t(int, _rx, max); + *tx = min_t(int, _tx, max); + } else { + if (max < 2) + return -ENOMEM; + + while (_rx + _tx > max) { + if (_rx > _tx && _rx > 1) + _rx--; + else if (_tx > 1) + _tx--; + } + *rx = _rx; + *tx = _tx; + } + return 0; +} + static int bnxt_setup_msix(struct bnxt *bp) { struct msix_entry *msix_ent; struct net_device *dev = bp->dev; - int i, total_vecs, rc = 0; + int i, total_vecs, rc = 0, min = 1; const int len = sizeof(bp->irq_tbl[0].name); bp->flags &= ~BNXT_FLAG_USING_MSIX; @@ -4054,7 +4089,10 @@ static int bnxt_setup_msix(struct bnxt *bp) msix_ent[i].vector = 0; } - total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs); + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + min = 2; + + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); if (total_vecs < 0) { rc = -ENODEV; goto msix_setup_exit; @@ -4065,8 +4103,11 @@ static int bnxt_setup_msix(struct bnxt *bp) int tcs; /* Trim rings based upon num of vectors allocated */ - bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings); - bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings); + rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, + total_vecs, min == 1); + if (rc) + goto msix_setup_exit; + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; tcs = netdev_get_num_tc(dev); if (tcs > 1) { @@ -4085,12 +4126,21 @@ static int bnxt_setup_msix(struct bnxt *bp) } } } - bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); + bp->cp_nr_rings = total_vecs; for (i = 0; i < bp->cp_nr_rings; i++) { + char *attr; + bp->irq_tbl[i].vector = msix_ent[i].vector; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + attr = "TxRx"; + else if (i < bp->rx_nr_rings) + attr = "rx"; + else + attr = "tx"; + snprintf(bp->irq_tbl[i].name, len, - "%s-%s-%d", dev->name, "TxRx", i); + "%s-%s-%d", dev->name, attr, i); bp->irq_tbl[i].handler = bnxt_msix; } rc = bnxt_set_real_num_queues(bp); @@ -4128,6 +4178,7 @@ static int bnxt_setup_inta(struct bnxt *bp) bp->tx_nr_rings = 1; bp->cp_nr_rings = 1; bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->irq_tbl[0].vector = bp->pdev->irq; snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 0); @@ -4176,7 +4227,7 @@ static void bnxt_free_irq(struct bnxt *bp) static int bnxt_request_irq(struct bnxt *bp) { - int i, rc = 0; + int i, j, rc = 0; unsigned long flags = 0; #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; @@ -4185,14 +4236,15 @@ static int bnxt_request_irq(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_USING_MSIX)) flags = IRQF_SHARED; - for (i = 0; i < bp->cp_nr_rings; i++) { + for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { struct bnxt_irq *irq = &bp->irq_tbl[i]; #ifdef CONFIG_RFS_ACCEL - if (rmap && (i < bp->rx_nr_rings)) { + if (rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); if (rc) netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", - i); + j); + j++; } #endif rc = request_irq(irq->vector, irq->handler, flags, irq->name, @@ -4230,12 +4282,10 @@ static void bnxt_init_napi(struct bnxt *bp) bnapi = bp->bnapi[i]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); - napi_hash_add(&bnapi->napi); } } else { bnapi = bp->bnapi[0]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); - napi_hash_add(&bnapi->napi); } } @@ -4265,14 +4315,12 @@ static void bnxt_enable_napi(struct bnxt *bp) static void bnxt_tx_disable(struct bnxt *bp) { int i; - struct bnxt_napi *bnapi; struct bnxt_tx_ring_info *txr; struct netdev_queue *txq; - if (bp->bnapi) { + if (bp->tx_ring) { for (i = 0; i < bp->tx_nr_rings; i++) { - bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; + txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(bp->dev, i); __netif_tx_lock(txq, smp_processor_id()); txr->dev_state = BNXT_DEV_STATE_CLOSING; @@ -4287,13 +4335,11 @@ static void bnxt_tx_disable(struct bnxt *bp) static void bnxt_tx_enable(struct bnxt *bp) { int i; - struct bnxt_napi *bnapi; struct bnxt_tx_ring_info *txr; struct netdev_queue *txq; for (i = 0; i < bp->tx_nr_rings; i++) { - bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; + txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = 0; } @@ -4355,7 +4401,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; link_info->force_pause_setting = resp->force_pause; - link_info->duplex_setting = resp->duplex_setting; + link_info->duplex_setting = resp->duplex; if (link_info->phy_link_status == BNXT_LINK_LINK) link_info->link_speed = le16_to_cpu(resp->link_speed); else @@ -4932,9 +4978,32 @@ skip_uc: return rc; } +static bool bnxt_rfs_capable(struct bnxt *bp) +{ +#ifdef CONFIG_RFS_ACCEL + struct bnxt_pf_info *pf = &bp->pf; + int vnics; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) + return false; + + vnics = 1 + bp->rx_nr_rings; + if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) + return false; + + return true; +#else + return false; +#endif +} + static netdev_features_t bnxt_fix_features(struct net_device *dev, netdev_features_t features) { + struct bnxt *bp = netdev_priv(dev); + + if (!bnxt_rfs_capable(bp)) + features &= ~NETIF_F_NTUPLE; return features; } @@ -4975,7 +5044,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) bp->flags = flags; - if (!netif_running(dev)) { + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_tpa) bnxt_set_ring_params(bp); return rc; @@ -4999,31 +5068,53 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } +static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; + int i = bnapi->index; + + if (!txr) + return; + + netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", + i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, + txr->tx_cons); +} + +static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + int i = bnapi->index; + + if (!rxr) + return; + + netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", + i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, + rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, + rxr->rx_sw_agg_prod); +} + +static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i = bnapi->index; + + netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); +} + static void bnxt_dbg_dump_states(struct bnxt *bp) { int i; struct bnxt_napi *bnapi; - struct bnxt_tx_ring_info *txr; - struct bnxt_rx_ring_info *rxr; - struct bnxt_cp_ring_info *cpr; for (i = 0; i < bp->cp_nr_rings; i++) { bnapi = bp->bnapi[i]; - txr = &bnapi->tx_ring; - rxr = &bnapi->rx_ring; - cpr = &bnapi->cp_ring; if (netif_msg_drv(bp)) { - netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", - i, txr->tx_ring_struct.fw_ring_id, - txr->tx_prod, txr->tx_cons); - netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", - i, rxr->rx_ring_struct.fw_ring_id, - rxr->rx_prod, - rxr->rx_agg_ring_struct.fw_ring_id, - rxr->rx_agg_prod, rxr->rx_sw_agg_prod); - netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", - i, cpr->cp_ring_struct.fw_ring_id, - cpr->cp_raw_cons); + bnxt_dump_tx_sw_state(bnapi); + bnxt_dump_rx_sw_state(bnapi); + bnxt_dump_cp_sw_state(bnapi); } } } @@ -5294,10 +5385,14 @@ static int bnxt_setup_tc(struct net_device *dev, u8 tc) return 0; if (tc) { - int max_rx_rings, max_tx_rings; + int max_rx_rings, max_tx_rings, rc; + bool sh = false; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); - if (bp->tx_nr_rings_per_tc * tc > max_tx_rings) + rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); + if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) return -ENOMEM; } @@ -5551,6 +5646,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) cancel_work_sync(&bp->sp_task); bp->sp_event = 0; + bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); pci_iounmap(pdev, bp->bar2); pci_iounmap(pdev, bp->bar1); @@ -5610,28 +5706,64 @@ static int bnxt_get_max_irq(struct pci_dev *pdev) return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; } -void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx) +static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, + int *max_cp) { - int max_rings = 0; + int max_ring_grps = 0; - if (BNXT_PF(bp)) { - *max_tx = bp->pf.max_pf_tx_rings; - *max_rx = bp->pf.max_pf_rx_rings; - max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); - max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs); - } else { #ifdef CONFIG_BNXT_SRIOV + if (!BNXT_PF(bp)) { *max_tx = bp->vf.max_tx_rings; *max_rx = bp->vf.max_rx_rings; - max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); - max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs); + *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); + *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); + max_ring_grps = bp->vf.max_hw_ring_grps; + } else #endif + { + *max_tx = bp->pf.max_tx_rings; + *max_rx = bp->pf.max_rx_rings; + *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); + *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); + max_ring_grps = bp->pf.max_hw_ring_grps; } + if (bp->flags & BNXT_FLAG_AGG_RINGS) *max_rx >>= 1; + *max_rx = min_t(int, *max_rx, max_ring_grps); +} + +int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) +{ + int rx, tx, cp; + + _bnxt_get_max_rings(bp, &rx, &tx, &cp); + if (!rx || !tx || !cp) + return -ENOMEM; - *max_rx = min_t(int, *max_rx, max_rings); - *max_tx = min_t(int, *max_tx, max_rings); + *max_rx = rx; + *max_tx = tx; + return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); +} + +static int bnxt_set_dflt_rings(struct bnxt *bp) +{ + int dflt_rings, max_rx_rings, max_tx_rings, rc; + bool sh = true; + + if (sh) + bp->flags |= BNXT_FLAG_SHARED_RINGS; + dflt_rings = netif_get_num_default_rss_queues(); + rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); + if (rc) + return rc; + bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); + bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : + bp->tx_nr_rings + bp->rx_nr_rings; + bp->num_stat_ctxs = bp->cp_nr_rings; + return rc; } static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -5639,7 +5771,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed; struct net_device *dev; struct bnxt *bp; - int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings; + int rc, max_irqs; if (version_printed++ == 0) pr_info("%s", version); @@ -5654,11 +5786,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (bnxt_vf_pciid(ent->driver_data)) bp->flags |= BNXT_FLAG_VF; - if (pdev->msix_cap) { + if (pdev->msix_cap) bp->flags |= BNXT_FLAG_MSIX_CAP; - if (BNXT_PF(bp)) - bp->flags |= BNXT_FLAG_RFS; - } rc = bnxt_init_board(pdev, dev); if (rc < 0) @@ -5677,9 +5806,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; - if (bp->flags & BNXT_FLAG_RFS) - dev->hw_features |= NETIF_F_NTUPLE; - dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | @@ -5724,19 +5850,21 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); - dflt_rings = netif_get_num_default_rss_queues(); if (BNXT_PF(bp)) bp->pf.max_irqs = max_irqs; #if defined(CONFIG_BNXT_SRIOV) else bp->vf.max_irqs = max_irqs; #endif - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); - bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); - bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; - bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); - bp->num_stat_ctxs = bp->cp_nr_rings; + bnxt_set_dflt_rings(bp); + + if (BNXT_PF(bp)) { + dev->hw_features |= NETIF_F_NTUPLE; + if (bnxt_rfs_capable(bp)) { + bp->flags |= BNXT_FLAG_RFS; + dev->features |= NETIF_F_NTUPLE; + } + } if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f199f4cc8ffe..8af3ca8efcef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -11,11 +11,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "0.1.24" +#define DRV_MODULE_VERSION "1.0.0" -#define DRV_VER_MAJ 0 -#define DRV_VER_MIN 1 -#define DRV_VER_UPD 24 +#define DRV_VER_MAJ 1 +#define DRV_VER_MIN 0 +#define DRV_VER_UPD 0 struct tx_bd { __le32 tx_bd_len_flags_type; @@ -528,6 +528,7 @@ struct tx_push_bd { }; struct bnxt_tx_ring_info { + struct bnxt_napi *bnapi; u16 tx_prod; u16 tx_cons; void __iomem *tx_doorbell; @@ -558,6 +559,7 @@ struct bnxt_tpa_info { }; struct bnxt_rx_ring_info { + struct bnxt_napi *bnapi; u16 rx_prod; u16 rx_agg_prod; u16 rx_sw_agg_prod; @@ -604,8 +606,8 @@ struct bnxt_napi { int index; struct bnxt_cp_ring_info cp_ring; - struct bnxt_rx_ring_info rx_ring; - struct bnxt_tx_ring_info tx_ring; + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_tx_ring_info *tx_ring; #ifdef CONFIG_NET_RX_BUSY_POLL atomic_t poll_state; @@ -695,6 +697,7 @@ struct bnxt_vf_info { u16 max_cp_rings; u16 max_tx_rings; u16 max_rx_rings; + u16 max_hw_ring_grps; u16 max_l2_ctxs; u16 max_irqs; u16 max_vnics; @@ -722,9 +725,8 @@ struct bnxt_pf_info { u16 max_rsscos_ctxs; u16 max_cp_rings; u16 max_tx_rings; /* HW assigned max tx rings for this PF */ - u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */ u16 max_rx_rings; /* HW assigned max rx rings for this PF */ - u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */ + u16 max_hw_ring_grps; u16 max_irqs; u16 max_l2_ctxs; u16 max_vnics; @@ -875,6 +877,8 @@ struct bnxt { #define BNXT_FLAG_USING_MSIX 0x40 #define BNXT_FLAG_MSIX_CAP 0x80 #define BNXT_FLAG_RFS 0x100 + #define BNXT_FLAG_SHARED_RINGS 0x200 + #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ BNXT_FLAG_STRIP_VLAN) @@ -884,6 +888,9 @@ struct bnxt { struct bnxt_napi **bnapi; + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_tx_ring_info *tx_ring; + u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u32 rx_ring_size; @@ -913,6 +920,8 @@ struct bnxt { int cp_nr_rings; int num_stat_ctxs; + + /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; int nr_vnics; @@ -1084,9 +1093,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int); int bnxt_hwrm_set_coal(struct bnxt *); +int bnxt_hwrm_func_qcaps(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_link_setting(struct bnxt *, bool); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool); -void bnxt_get_max_rings(struct bnxt *, int *, int *); +int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 45bd628eaf3a..922b898e7a32 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -211,7 +211,10 @@ static void bnxt_get_channels(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); + channel->max_combined = max_rx_rings; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false); tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; @@ -219,9 +222,12 @@ static void bnxt_get_channels(struct net_device *dev, channel->max_rx = max_rx_rings; channel->max_tx = max_tx_rings; channel->max_other = 0; - channel->max_combined = 0; - channel->rx_count = bp->rx_nr_rings; - channel->tx_count = bp->tx_nr_rings_per_tc; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) { + channel->combined_count = bp->rx_nr_rings; + } else { + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; + } } static int bnxt_set_channels(struct net_device *dev, @@ -230,19 +236,35 @@ static int bnxt_set_channels(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; u32 rc = 0; + bool sh = false; + + if (channel->other_count) + return -EINVAL; + + if (!channel->combined_count && + (!channel->rx_count || !channel->tx_count)) + return -EINVAL; - if (channel->other_count || channel->combined_count || - !channel->rx_count || !channel->tx_count) + if (channel->combined_count && + (channel->rx_count || channel->tx_count)) return -EINVAL; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + if (channel->combined_count) + sh = true; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); + tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; - if (channel->rx_count > max_rx_rings || - channel->tx_count > max_tx_rings) - return -EINVAL; + if (sh && (channel->combined_count > max_rx_rings || + channel->combined_count > max_tx_rings)) + return -ENOMEM; + + if (!sh && (channel->rx_count > max_rx_rings || + channel->tx_count > max_tx_rings)) + return -ENOMEM; if (netif_running(dev)) { if (BNXT_PF(bp)) { @@ -258,14 +280,27 @@ static int bnxt_set_channels(struct net_device *dev, } } - bp->rx_nr_rings = channel->rx_count; - bp->tx_nr_rings_per_tc = channel->tx_count; + if (sh) { + bp->flags |= BNXT_FLAG_SHARED_RINGS; + bp->rx_nr_rings = channel->combined_count; + bp->tx_nr_rings_per_tc = channel->combined_count; + } else { + bp->flags &= ~BNXT_FLAG_SHARED_RINGS; + bp->rx_nr_rings = channel->rx_count; + bp->tx_nr_rings_per_tc = channel->tx_count; + } + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; if (tcs > 1) bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; - bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + + bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : + bp->tx_nr_rings + bp->rx_nr_rings; + bp->num_stat_ctxs = bp->cp_nr_rings; + /* After changing number of rx channels, update NTUPLE feature. */ + netdev_update_features(dev); if (netif_running(dev)) { rc = bnxt_open_nic(bp, true, false); if ((!rc) && BNXT_PF(bp)) { @@ -802,6 +837,45 @@ static int bnxt_flash_nvram(struct net_device *dev, return rc; } +static int bnxt_firmware_reset(struct net_device *dev, + u16 dir_type) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + + /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ + /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ + /* (e.g. when firmware isn't already running) */ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; + /* Self-reset ChiMP upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + break; + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + break; + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + req.embedded_proc_type = + FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; + break; + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; + break; + default: + return -EINVAL; + } + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static int bnxt_flash_firmware(struct net_device *dev, u16 dir_type, const u8 *fw_data, @@ -818,6 +892,9 @@ static int bnxt_flash_firmware(struct net_device *dev, case BNX_DIR_TYPE_BOOTCODE_2: code_type = CODE_BOOT; break; + case BNX_DIR_TYPE_APE_FW: + code_type = CODE_MCTP_PASSTHRU; + break; default: netdev_err(dev, "Unsupported directory entry type: %u\n", dir_type); @@ -856,10 +933,9 @@ static int bnxt_flash_firmware(struct net_device *dev, /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 0, 0, fw_data, fw_size); - if (rc == 0) { /* Firmware update successful */ - /* TODO: Notify processor it needs to reset itself - */ - } + if (rc == 0) /* Firmware update successful */ + rc = bnxt_firmware_reset(dev, dir_type); + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 70fc8253c07f..4badbedcb421 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -103,19 +103,22 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0) __le32 event_data2; u8 opaque_v; #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; }; @@ -132,9 +135,16 @@ struct hwrm_async_event_cmpl_link_status_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; - #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 }; /* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */ @@ -150,7 +160,8 @@ struct hwrm_async_event_cmpl_link_mtu_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 @@ -169,7 +180,8 @@ struct hwrm_async_event_cmpl_link_speed_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL @@ -200,7 +212,8 @@ struct hwrm_async_event_cmpl_dcb_config_change { #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 @@ -219,7 +232,8 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed { #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 @@ -238,7 +252,8 @@ struct hwrm_async_event_cmpl_func_drvr_unload { #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 @@ -257,7 +272,8 @@ struct hwrm_async_event_cmpl_func_drvr_load { #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 @@ -276,10 +292,13 @@ struct hwrm_async_event_cmpl_pf_drvr_unload { #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 }; /* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */ @@ -289,16 +308,19 @@ struct hwrm_async_event_cmpl_pf_drvr_load { #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) __le16 event_id; - #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0) __le32 event_data2; u8 opaque_v; #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 }; /* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */ @@ -314,7 +336,8 @@ struct hwrm_async_event_cmpl_vf_flr { #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 @@ -333,7 +356,8 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change { #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 @@ -357,18 +381,20 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 - u8 unused_1[3]; + u8 timestamp_lo; + __le16 timestamp_hi; __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL }; -/* HW Resource Manager Specification 0.7.8 */ -#define HWRM_VERSION_MAJOR 0 -#define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 8 +/* HW Resource Manager Specification 1.0.0 */ +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 0 +#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_STR "0.7.8" -/* Following is the signature for HWRM message field that indicates not +#define HWRM_VERSION_STR "1.0.0" +/* + * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. */ #define HWRM_NA_SIGNATURE ((__le32)(-1)) @@ -398,7 +424,9 @@ struct output { struct cmd_nums { __le16 req_type; #define HWRM_VER_GET (0x0UL) - #define HWRM_FUNC_DISABLE (0x10UL) + #define HWRM_FUNC_BUF_UNRGTR (0xeUL) + #define HWRM_FUNC_VF_CFG (0xfUL) + #define RESERVED1 (0x10UL) #define HWRM_FUNC_RESET (0x11UL) #define HWRM_FUNC_GETFID (0x12UL) #define HWRM_FUNC_VF_ALLOC (0x13UL) @@ -414,10 +442,9 @@ struct cmd_nums { #define HWRM_FUNC_DRV_RGTR (0x1dUL) #define HWRM_FUNC_DRV_QVER (0x1eUL) #define HWRM_FUNC_BUF_RGTR (0x1fUL) - #define HWRM_FUNC_VF_CFG (0x20UL) #define HWRM_PORT_PHY_CFG (0x20UL) #define HWRM_PORT_MAC_CFG (0x21UL) - #define HWRM_PORT_ENABLE (0x22UL) + #define RESERVED2 (0x22UL) #define HWRM_PORT_QSTATS (0x23UL) #define HWRM_PORT_LPBK_QSTATS (0x24UL) #define HWRM_PORT_CLR_STATS (0x25UL) @@ -455,13 +482,11 @@ struct cmd_nums { #define HWRM_RING_GRP_FREE (0x61UL) #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL) #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL) - #define HWRM_ARB_GRP_ALLOC (0x80UL) - #define HWRM_ARB_GRP_CFG (0x81UL) #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL) #define HWRM_CFA_L2_FILTER_FREE (0x91UL) #define HWRM_CFA_L2_FILTER_CFG (0x92UL) #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) - #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL) + #define RESERVED3 (0x94UL) #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) @@ -469,6 +494,9 @@ struct cmd_nums { #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL) #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL) #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL) + #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL) + #define HWRM_CFA_EM_FLOW_FREE (0x9dUL) + #define HWRM_CFA_EM_FLOW_CFG (0x9eUL) #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL) #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL) #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL) @@ -483,8 +511,6 @@ struct cmd_nums { #define HWRM_FWD_RESP (0xd2UL) #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) - #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL) - #define HWRM_MGMT_L2_FILTER_FREE (0x101UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) @@ -505,7 +531,6 @@ struct cmd_nums { __le16 unused_0[3]; }; -/* Return Codes (8 bytes) */ struct ret_codes { __le16 error_code; #define HWRM_ERR_CODE_SUCCESS (0x0UL) @@ -529,7 +554,7 @@ struct hwrm_err_output { __le16 resp_len; __le32 opaque_0; __le16 opaque_1; - u8 opaque_2; + u8 cmd_err; u8 valid; }; @@ -686,65 +711,38 @@ struct hwrm_ver_get_output { u8 hwrm_fw_min; u8 hwrm_fw_bld; u8 hwrm_fw_rsvd; - u8 ape_fw_maj; - u8 ape_fw_min; - u8 ape_fw_bld; - u8 ape_fw_rsvd; - u8 kong_fw_maj; - u8 kong_fw_min; - u8 kong_fw_bld; - u8 kong_fw_rsvd; - u8 tang_fw_maj; - u8 tang_fw_min; - u8 tang_fw_bld; - u8 tang_fw_rsvd; - u8 bono_fw_maj; - u8 bono_fw_min; - u8 bono_fw_bld; - u8 bono_fw_rsvd; + u8 mgmt_fw_maj; + u8 mgmt_fw_min; + u8 mgmt_fw_bld; + u8 mgmt_fw_rsvd; + u8 netctrl_fw_maj; + u8 netctrl_fw_min; + u8 netctrl_fw_bld; + u8 netctrl_fw_rsvd; + __le32 reserved1; + u8 roce_fw_maj; + u8 roce_fw_min; + u8 roce_fw_bld; + u8 roce_fw_rsvd; char hwrm_fw_name[16]; - char ape_fw_name[16]; - char kong_fw_name[16]; - char tang_fw_name[16]; - char bono_fw_name[16]; + char mgmt_fw_name[16]; + char netctrl_fw_name[16]; + __le32 reserved2[4]; + char roce_fw_name[16]; __le16 chip_num; u8 chip_rev; u8 chip_metal; u8 chip_bond_id; - u8 unused_0; + u8 chip_platform_type; + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC (0x0UL << 0) + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA (0x1UL << 0) + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM (0x2UL << 0) __le16 max_req_win_len; __le16 max_resp_len; __le16 def_req_timeout; + u8 unused_0; u8 unused_1; u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_disable */ -/* Input (24 bytes) */ -struct hwrm_func_disable_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL - __le16 vf_id; - __le16 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_func_disable_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; u8 valid; }; @@ -759,7 +757,12 @@ struct hwrm_func_reset_input { __le32 enables; #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL __le16 vf_id; - __le16 unused_0; + u8 func_reset_level; + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL (0x0UL << 0) + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME (0x1UL << 0) + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN (0x2UL << 0) + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF (0x3UL << 0) + u8 unused_0; }; /* Output (16 bytes) */ @@ -861,7 +864,7 @@ struct hwrm_func_vf_free_output { }; /* hwrm_func_vf_cfg */ -/* Input (24 bytes) */ +/* Input (32 bytes) */ struct hwrm_func_vf_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -871,8 +874,11 @@ struct hwrm_func_vf_cfg_input { __le32 enables; #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL __le16 mtu; __le16 guest_vlan; + __le16 async_event_cr; + __le16 unused_0[3]; }; /* Output (16 bytes) */ @@ -944,7 +950,7 @@ struct hwrm_func_cfg_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le16 vf_id; + __le16 fid; u8 unused_0; u8 unused_1; __le32 flags; @@ -1000,10 +1006,6 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0) #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) u8 allowed_vlan_pris; - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0) - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0) - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0) - #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) u8 evb_mode; #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0) #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0) @@ -1166,6 +1168,15 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL __le16 os_type; + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI (0x68UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 (0x73UL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 (0x74UL << 0) u8 ver_maj; u8 ver_min; u8 ver_upd; @@ -1276,9 +1287,7 @@ struct hwrm_func_drv_qver_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 enables; - #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL - #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL + __le32 reserved; __le16 fid; __le16 unused_0; }; @@ -1290,6 +1299,15 @@ struct hwrm_func_drv_qver_output { __le16 seq_id; __le16 resp_len; __le16 os_type; + #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI (0x68UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 (0x73UL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 (0x74UL << 0) u8 ver_maj; u8 ver_min; u8 ver_upd; @@ -1498,9 +1516,7 @@ struct hwrm_port_phy_qcfg_output { u8 force_pause; #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL - u8 duplex_setting; - #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0) - #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0) + u8 reserved1; __le32 preemphasis; u8 phy_maj; u8 phy_min; @@ -1601,33 +1617,6 @@ struct hwrm_port_mac_cfg_output { u8 valid; }; -/* hwrm_port_enable */ -/* Input (24 bytes) */ -struct hwrm_port_enable_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL - __le16 port_id; - __le16 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_port_enable_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - /* hwrm_port_qstats */ /* Input (40 bytes) */ struct hwrm_port_qstats_input { @@ -1651,10 +1640,11 @@ struct hwrm_port_qstats_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - __le32 unused_0; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0; u8 unused_1; u8 unused_2; - u8 unused_3; u8 valid; }; @@ -1668,7 +1658,7 @@ struct hwrm_port_lpbk_qstats_input { __le64 resp_addr; }; -/* Output (64 bytes) */ +/* Output (96 bytes) */ struct hwrm_port_lpbk_qstats_output { __le16 error_code; __le16 req_type; @@ -1680,6 +1670,10 @@ struct hwrm_port_lpbk_qstats_output { __le64 lpbk_ucast_bytes; __le64 lpbk_mcast_bytes; __le64 lpbk_bcast_bytes; + __le64 tx_stat_discard; + __le64 tx_stat_error; + __le64 rx_stat_discard; + __le64 rx_stat_error; __le32 unused_0; u8 unused_1; u8 unused_2; @@ -1884,12 +1878,11 @@ struct hwrm_queue_buffers_cfg_input { __le32 enables; #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL - #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x4UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x8UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x10UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x20UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x40UL __le32 queue_id; __le32 reserved; __le32 shared; @@ -1921,15 +1914,15 @@ struct hwrm_queue_pfcenable_cfg_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 enables; - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL - #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL + __le32 flags; + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL __le16 port_id; __le16 unused_0; }; @@ -1962,14 +1955,14 @@ struct hwrm_queue_pri2cos_cfg_input { #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL __le32 enables; u8 port_id; - u8 pri0_cos; - u8 pri1_cos; - u8 pri2_cos; - u8 pri3_cos; - u8 pri4_cos; - u8 pri5_cos; - u8 pri6_cos; - u8 pri7_cos; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; u8 unused_0[7]; }; @@ -2164,6 +2157,7 @@ struct hwrm_vnic_cfg_input { __le32 flags; #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -2380,18 +2374,16 @@ struct hwrm_ring_alloc_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL - #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL - #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL + #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL + #define RING_ALLOC_REQ_ENABLES_RESERVED2 0x2UL + #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL - #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL + #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0) #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0) #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0) - #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0) - #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0) u8 unused_0; __le16 unused_1; __le64 page_tbl_addr; @@ -2406,17 +2398,17 @@ struct hwrm_ring_alloc_input { __le16 queue_id; u8 unused_4; u8 unused_5; - __le32 arb_grp_id; - __le16 input_number; + __le32 reserved1; + __le16 reserved2; u8 unused_6; u8 unused_7; - __le32 weight; + __le32 reserved3; __le32 stat_ctx_id; - __le32 min_bw; + __le32 reserved4; __le32 max_bw; u8 int_mode; #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0) - #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0) + #define RING_ALLOC_REQ_INT_MODE_RSVD (0x1UL << 0) #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0) #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0) u8 unused_8[3]; @@ -2448,8 +2440,6 @@ struct hwrm_ring_free_input { #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0) #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0) #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0) - #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0) - #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0) u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -2550,8 +2540,6 @@ struct hwrm_ring_reset_input { #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0) #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0) #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0) - #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0) - #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0) u8 unused_0; __le16 ring_id; __le32 unused_1; @@ -2622,61 +2610,6 @@ struct hwrm_ring_grp_free_output { u8 valid; }; -/* hwrm_arb_grp_alloc */ -/* Input (24 bytes) */ -struct hwrm_arb_grp_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 input_number; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_arb_grp_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 arb_grp_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_arb_grp_cfg */ -/* Input (32 bytes) */ -struct hwrm_arb_grp_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 arb_grp_id; - __le16 input_number; - __le16 tx_ring; - __le32 weight; - __le32 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_arb_grp_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - /* hwrm_cfa_l2_filter_alloc */ /* Input (96 bytes) */ struct hwrm_cfa_l2_filter_alloc_input { @@ -2708,7 +2641,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL u8 l2_addr[6]; u8 unused_0; @@ -2751,7 +2684,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_7; - __le16 dst_vnic_id; + __le16 dst_id; __le16 mirror_vnic_id; u8 pri_hint; #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0) @@ -2816,10 +2749,11 @@ struct hwrm_cfa_l2_filter_cfg_input { #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL __le32 enables; - #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL __le64 l2_filter_id; - __le32 dst_vnic_id; - __le32 unused_0; + __le32 dst_id; + __le32 new_mirror_vnic_id; }; /* Output (16 bytes) */ @@ -2843,9 +2777,9 @@ struct hwrm_cfa_l2_set_rx_mask_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 dflt_vnic_id; + __le32 vnic_id; __le32 mask; - #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL @@ -2869,46 +2803,6 @@ struct hwrm_cfa_l2_set_rx_mask_output { u8 valid; }; -/* hwrm_cfa_l2_set_bcastmcast_mirroring */ -/* Input (32 bytes) */ -struct hwrm_cfa_l2_set_bcastmcast_mirroring_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 dflt_vnic_id; - __le32 mirroring_flags; - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL - __le16 vlan_id; - u8 bcast_domain; - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) - u8 mcast_domain; - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0) - #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) - __le32 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_cfa_l2_set_bcastmcast_mirroring_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - /* hwrm_cfa_tunnel_filter_alloc */ /* Input (88 bytes) */ struct hwrm_cfa_tunnel_filter_alloc_input { @@ -3017,17 +2911,16 @@ struct hwrm_cfa_encap_record_alloc_input { __le32 encap_data[16]; }; -/* Output (24 bytes) */ +/* Output (16 bytes) */ struct hwrm_cfa_encap_record_alloc_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; - __le64 encap_record_id; - __le32 unused_0; + __le32 encap_record_id; + u8 unused_0; u8 unused_1; u8 unused_2; - u8 unused_3; u8 valid; }; @@ -3039,7 +2932,8 @@ struct hwrm_cfa_encap_record_free_input { __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le64 encap_record_id; + __le32 encap_record_id; + __le32 unused_0; }; /* Output (16 bytes) */ @@ -3083,14 +2977,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL __le64 l2_filter_id; u8 src_macaddr[6]; __be16 ethertype; - u8 ipaddr_type; + u8 ip_addr_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 (0x4UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 (0x6UL << 0) u8 ip_protocol; - __le16 dst_vnic_id; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP (0x6UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP (0x11UL << 0) + __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) @@ -3104,6 +3005,11 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 pri_hint; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE (0x1UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW (0x2UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST (0x3UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST (0x4UL << 0) __be32 src_ipaddr[4]; __be32 src_ipaddr_mask[4]; __be32 dst_ipaddr[4]; @@ -3162,11 +3068,11 @@ struct hwrm_cfa_ntuple_filter_cfg_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL - #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL __le32 unused_0; __le64 ntuple_filter_id; - __le32 new_dst_vnic_id; + __le32 new_dst_id; __le32 new_mirror_vnic_id; }; @@ -3192,16 +3098,8 @@ struct hwrm_tunnel_dst_port_query_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_0[7]; }; @@ -3228,16 +3126,8 @@ struct hwrm_tunnel_dst_port_alloc_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_0; __be16 tunnel_dst_port_val; __le32 unused_1; @@ -3267,16 +3157,8 @@ struct hwrm_tunnel_dst_port_free_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) u8 unused_0; __le16 tunnel_dst_port_id; __le32 unused_1; @@ -3416,68 +3298,145 @@ struct hwrm_stat_ctx_clr_stats_output { u8 valid; }; -/* hwrm_mgmt_l2_filter_alloc */ -/* Input (56 bytes) */ -struct hwrm_mgmt_l2_filter_alloc_input { +/* hwrm_fw_reset */ +/* Input (24 bytes) */ +struct hwrm_fw_reset_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le32 flags; - #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL - #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) - #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) - __le32 enables; - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL - #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL - u8 l2_address[6]; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0) + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_exec_fwd_resp */ +/* Input (128 bytes) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; u8 unused_1; - u8 l2_address_mask[6]; - __le16 ovlan; - __le16 ovlan_mask; - __le16 ivlan; - __le16 ivlan_mask; u8 unused_2; u8 unused_3; - __le32 action_id; - u8 action_bypass; - #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL - u8 unused_5[3]; + u8 valid; +}; + +/* hwrm_reject_fwd_resp */ +/* Input (128 bytes) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; }; /* Output (16 bytes) */ -struct hwrm_mgmt_l2_filter_alloc_output { +struct hwrm_reject_fwd_resp_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; - __le16 mgmt_l2_filter_id; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fwd_resp */ +/* Input (40 bytes) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; u8 unused_0; u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* Output (16 bytes) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; u8 unused_2; u8 unused_3; - u8 unused_4; u8 valid; }; -/* hwrm_mgmt_l2_filter_free */ -/* Input (24 bytes) */ -struct hwrm_mgmt_l2_filter_free_input { +/* hwrm_fwd_async_event_cmpl */ +/* Input (32 bytes) */ +struct hwrm_fwd_async_event_cmpl_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le16 mgmt_l2_filter_id; - __le16 unused_0[3]; + __le16 encap_async_event_target_id; + u8 unused_0; + u8 unused_1; + u8 unused_2[3]; + u8 unused_3; + __le32 encap_async_event_cmpl[4]; }; /* Output (16 bytes) */ -struct hwrm_mgmt_l2_filter_free_output { +struct hwrm_fwd_async_event_cmpl_output { __le16 error_code; __le16 req_type; __le16 seq_id; @@ -3489,6 +3448,31 @@ struct hwrm_mgmt_l2_filter_free_output { u8 valid; }; +/* hwrm_temp_monitor_query */ +/* Input (16 bytes) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + /* hwrm_nvm_raw_write_blk */ /* Input (32 bytes) */ struct hwrm_nvm_raw_write_blk_input { @@ -3621,7 +3605,7 @@ struct hwrm_nvm_get_dir_info_output { }; /* hwrm_nvm_write */ -/* Input (40 bytes) */ +/* Input (48 bytes) */ struct hwrm_nvm_write_input { __le16 req_type; __le16 cmpl_ring; @@ -3637,6 +3621,8 @@ struct hwrm_nvm_write_input { __le16 option; __le16 flags; #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + __le32 dir_item_length; + __le32 unused_0; }; /* Output (16 bytes) */ @@ -3645,10 +3631,9 @@ struct hwrm_nvm_write_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; + __le32 dir_item_length; + __le16 dir_idx; + u8 unused_0; u8 valid; }; @@ -3833,214 +3818,4 @@ struct hwrm_nvm_verify_update_output { u8 valid; }; -/* hwrm_exec_fwd_resp */ -/* Input (120 bytes) */ -struct hwrm_exec_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 encap_request[24]; - __le16 encap_resp_target_id; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_exec_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_reject_fwd_resp */ -/* Input (120 bytes) */ -struct hwrm_reject_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 encap_request[24]; - __le16 encap_resp_target_id; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_reject_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fwd_resp */ -/* Input (40 bytes) */ -struct hwrm_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 encap_resp_target_id; - __le16 encap_resp_cmpl_ring; - __le16 encap_resp_len; - u8 unused_0; - u8 unused_1; - __le64 encap_resp_addr; - __le32 encap_resp[24]; -}; - -/* Output (16 bytes) */ -struct hwrm_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fwd_async_event_cmpl */ -/* Input (32 bytes) */ -struct hwrm_fwd_async_event_cmpl_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 encap_async_event_target_id; - u8 unused_0; - u8 unused_1; - u8 unused_2[3]; - u8 unused_3; - __le32 encap_async_event_cmpl[4]; -}; - -/* Output (16 bytes) */ -struct hwrm_fwd_async_event_cmpl_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fw_reset */ -/* Input (24 bytes) */ -struct hwrm_fw_reset_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 embedded_proc_type; - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) - u8 selfrst_status; - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_fw_reset_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 selfrst_status; - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_fw_qstatus */ -/* Input (24 bytes) */ -struct hwrm_fw_qstatus_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 embedded_proc_type; - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) - u8 unused_0[7]; -}; - -/* Output (16 bytes) */ -struct hwrm_fw_qstatus_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 selfrst_status; - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_temp_monitor_query */ -/* Input (16 bytes) */ -struct hwrm_temp_monitor_query_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; -}; - -/* Output (16 bytes) */ -struct hwrm_temp_monitor_query_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 temp; - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index ea044bbcd384..c1cc83d7e38c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -64,7 +64,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) * the spoof check should also include vlan anti-spoofing */ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(func_flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { @@ -128,7 +128,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) memcpy(vf->mac_addr, mac, ETH_ALEN); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac, ETH_ALEN); @@ -159,7 +159,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.dflt_vlan = cpu_to_le16(vlan_tag); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); @@ -198,7 +198,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.vf_id = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.max_bw = cpu_to_le32(max_tx_rate); @@ -363,10 +363,11 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) } /* only call by PF to reserve resources for VF */ -static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) +static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; + u16 vf_ring_grps; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; @@ -378,18 +379,18 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) * be removed once new HWRM provides HW ring groups capability in * hwrm_func_qcap. */ - vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs); - vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs; + vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs); + vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs; /* TODO: restore this logic below once the WA above is removed */ - /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */ - vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs; + /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */ + vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; if (bp->flags & BNXT_FLAG_AGG_RINGS) - vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) / - *num_vfs; + vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / + num_vfs; else - vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) / - *num_vfs; - vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs; + vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; + vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; + vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | FUNC_CFG_REQ_ENABLES_MRU | @@ -399,7 +400,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS); + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; req.mru = cpu_to_le16(mtu); @@ -409,6 +411,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); req.num_tx_rings = cpu_to_le16(vf_tx_rings); req.num_rx_rings = cpu_to_le16(vf_rx_rings); + req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.num_l2_ctxs = cpu_to_le16(4); vf_vnics = 1; @@ -417,22 +420,24 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); mutex_lock(&bp->hwrm_cmd_lock); - for (i = 0; i < *num_vfs; i++) { - req.vf_id = cpu_to_le16(pf->first_vf_id + i); + for (i = 0; i < num_vfs; i++) { + req.fid = cpu_to_le16(pf->first_vf_id + i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) break; - bp->pf.active_vfs = i + 1; - bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id); + pf->active_vfs = i + 1; + pf->vf[i].fw_fid = le16_to_cpu(req.fid); } mutex_unlock(&bp->hwrm_cmd_lock); if (!rc) { - bp->pf.max_pf_tx_rings = bp->tx_nr_rings; - if (bp->flags & BNXT_FLAG_AGG_RINGS) - bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2; - else - bp->pf.max_pf_rx_rings = bp->rx_nr_rings; + pf->max_tx_rings -= vf_tx_rings * num_vfs; + pf->max_rx_rings -= vf_rx_rings * num_vfs; + pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; + pf->max_cp_rings -= vf_cp_rings * num_vfs; + pf->max_rsscos_ctxs -= num_vfs; + pf->max_stat_ctxs -= vf_stat_ctx * num_vfs; + pf->max_vnics -= vf_vnics * num_vfs; } return rc; } @@ -492,7 +497,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) goto err_out1; /* Reserve resources for VFs */ - rc = bnxt_hwrm_func_cfg(bp, num_vfs); + rc = bnxt_hwrm_func_cfg(bp, *num_vfs); if (rc) goto err_out2; @@ -536,8 +541,8 @@ void bnxt_sriov_disable(struct bnxt *bp) bnxt_free_vf_resources(bp); bp->pf.active_vfs = 0; - bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings; - bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings; + /* Reclaim all resources for the PF. */ + bnxt_hwrm_func_qcaps(bp); } int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) @@ -595,6 +600,7 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); req.encap_resp_len = cpu_to_le16(msg_size); req.encap_resp_addr = encap_resp_addr; req.encap_resp_cmpl_ring = encap_resp_cpr; @@ -629,6 +635,7 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); mutex_lock(&bp->hwrm_cmd_lock); @@ -660,6 +667,7 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); mutex_lock(&bp->hwrm_cmd_lock); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 17f017ab4dac..b15a60d787c7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2041,11 +2041,11 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); } ring = &priv->tx_rings[DESC_INDEX]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); } static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 8bdfe53754ba..0d775964b060 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -401,9 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev) * Ethernet MAC ISRs */ if (priv->internal_phy) - priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; - else - priv->mii_bus->irq[phydev->addr] = PHY_POLL; + priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT; return 0; } @@ -477,12 +475,6 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", priv->pdev->name, priv->pdev->id); - bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!bus->irq) { - mdiobus_free(priv->mii_bus); - return -ENOMEM; - } - return 0; } @@ -581,7 +573,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) } if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - phydev = mdio->phy_map[pd->phy_address]; + phydev = mdiobus_get_phy(mdio, pd->phy_address); else phydev = phy_find_first(mdio); @@ -648,7 +640,6 @@ int bcmgenet_mii_init(struct net_device *dev) out: of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return ret; } @@ -659,6 +650,5 @@ void bcmgenet_mii_exit(struct net_device *dev) of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index f557a2aaec23..eacc559679bf 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -238,7 +238,6 @@ struct sbmac_softc { struct napi_struct napi; struct phy_device *phy_dev; /* the associated PHY device */ struct mii_bus *mii_bus; /* the MII bus */ - int phy_irq[PHY_MAX_ADDR]; spinlock_t sbm_lock; /* spin lock */ int sbm_devflags; /* current device flags */ @@ -2250,9 +2249,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) sc->mii_bus->priv = sc; sc->mii_bus->read = sbmac_mii_read; sc->mii_bus->write = sbmac_mii_write; - sc->mii_bus->irq = sc->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - sc->mii_bus->irq[i] = SBMAC_PHY_INT; sc->mii_bus->parent = &pldev->dev; /* @@ -2358,20 +2354,15 @@ static int sbmac_mii_probe(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); struct phy_device *phy_dev; - int i; - for (i = 0; i < PHY_MAX_ADDR; i++) { - phy_dev = sc->mii_bus->phy_map[i]; - if (phy_dev) - break; - } + phy_dev = phy_find_first(sc->mii_bus); if (!phy_dev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENXIO; } - phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, - PHY_INTERFACE_MODE_GMII); + phy_dev = phy_connect(dev, dev_name(&phy_dev->mdio.dev), + &sbmac_mii_poll, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); return PTR_ERR(phy_dev); @@ -2388,11 +2379,10 @@ static int sbmac_mii_probe(struct net_device *dev) SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phy_dev->advertising = phy_dev->supported; - pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phy_dev->drv->name, - dev_name(&phy_dev->dev), phy_dev->irq); + phy_attached_info(phy_dev); + + phy_dev->advertising = phy_dev->supported; sc->phy_dev = phy_dev; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 79789d8e52da..9293675df7ba 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1406,7 +1406,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) u32 val; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM50610: case PHY_ID_BCM50610M: @@ -1538,10 +1538,6 @@ static int tg3_mdio_init(struct tg3 *tp) tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); - tp->mdio_bus->irq = &tp->mdio_irq[0]; - - for (i = 0; i < PHY_MAX_ADDR; i++) - tp->mdio_bus->irq[i] = PHY_POLL; /* The bus registration will look for all the PHYs on the mdio bus. * Unfortunately, it does not ensure the PHY is powered up before @@ -1558,7 +1554,7 @@ static int tg3_mdio_init(struct tg3 *tp) return i; } - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (!phydev || !phydev->drv) { dev_warn(&tp->pdev->dev, "No PHY devices\n"); @@ -1968,7 +1964,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) u32 old_tx_mode = tp->tx_mode; if (tg3_flag(tp, USE_PHYLIB)) - autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; + autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; else autoneg = tp->link_config.autoneg; @@ -2004,7 +2000,7 @@ static void tg3_adjust_link(struct net_device *dev) u8 oldflowctrl, linkmesg = 0; u32 mac_mode, lcl_adv, rmt_adv; struct tg3 *tp = netdev_priv(dev); - struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); spin_lock_bh(&tp->lock); @@ -2093,10 +2089,10 @@ static int tg3_phy_init(struct tg3 *tp) /* Bring the PHY back to a known state. */ tg3_bmcr_reset(tp); - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); /* Attach the MAC to the PHY. */ - phydev = phy_connect(tp->dev, dev_name(&phydev->dev), + phydev = phy_connect(tp->dev, phydev_name(phydev), tg3_adjust_link, phydev->interface); if (IS_ERR(phydev)) { dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); @@ -2120,7 +2116,7 @@ static int tg3_phy_init(struct tg3 *tp) SUPPORTED_Asym_Pause); break; default: - phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); return -EINVAL; } @@ -2128,6 +2124,8 @@ static int tg3_phy_init(struct tg3 *tp) phydev->advertising = phydev->supported; + phy_attached_info(phydev); + return 0; } @@ -2138,7 +2136,7 @@ static void tg3_phy_start(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; @@ -2158,13 +2156,13 @@ static void tg3_phy_stop(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); } static void tg3_phy_fini(struct tg3 *tp) { if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } @@ -4048,7 +4046,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) struct phy_device *phydev; u32 phyid, advertising; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; @@ -12076,7 +12074,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_ethtool_gset(phydev, cmd); } @@ -12143,7 +12141,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_ethtool_sset(phydev, cmd); } @@ -12298,7 +12296,7 @@ static int tg3_nway_reset(struct net_device *dev) if (tg3_flag(tp, USE_PHYLIB)) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); + r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); } else { u32 bmcr; @@ -12416,7 +12414,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam u32 newadv; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && @@ -13926,7 +13924,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); return phy_mii_ioctl(phydev, ifr, cmd); } @@ -17898,13 +17896,7 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_bus_string(tp, str), dev->dev_addr); - if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[tp->phy_addr]; - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); - } else { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { char *ethtype; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 31c9f8295953..3b5e98ecba00 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3254,7 +3254,6 @@ struct tg3 { int pcie_readrq; struct mii_bus *mdio_bus; - int mdio_irq[PHY_MAX_ADDR]; int old_link; u8 phy_addr; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 169059c92f80..c56347536f6b 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -28,6 +29,7 @@ #include <linux/phy.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/of_mdio.h> #include <linux/of_net.h> @@ -439,12 +441,6 @@ static int macb_mii_init(struct macb *bp) bp->mii_bus->parent = &bp->dev->dev; pdata = dev_get_platdata(&bp->pdev->dev); - bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (!bp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; @@ -469,9 +465,6 @@ static int macb_mii_init(struct macb *bp) goto err_out_unregister_bus; } } else { - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - if (pdata) bp->mii_bus->phy_mask = pdata->phy_mask; @@ -479,7 +472,7 @@ static int macb_mii_init(struct macb *bp) } if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; err = macb_mii_probe(bp->dev); if (err) @@ -489,8 +482,6 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); -err_out_free_mdio_irq: - kfree(bp->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(bp->mii_bus); err_out: @@ -2122,7 +2113,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); regs_buff[11] = macb_tx_dma(&bp->queues[0], head); - regs_buff[12] = macb_or_gem_readl(bp, USRIO); + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) + regs_buff[12] = macb_or_gem_readl(bp, USRIO); if (macb_is_gem(bp)) { regs_buff[13] = gem_readl(bp, DMACFG); } @@ -2401,19 +2393,21 @@ static int macb_init(struct platform_device *pdev) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; - val = 0; - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) - val = GEM_BIT(RGMII); - else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) - val = MACB_BIT(RMII); - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) - val = MACB_BIT(MII); + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { + val = 0; + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + val = GEM_BIT(RGMII); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(RMII); + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(MII); - if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) - val |= MACB_BIT(CLKEN); + if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) + val |= MACB_BIT(CLKEN); - macb_or_gem_writel(bp, USRIO, val); + macb_or_gem_writel(bp, USRIO, val); + } /* Set MII management clock divider */ val = macb_mdc_clk_div(bp); @@ -2776,6 +2770,11 @@ static const struct macb_config emac_config = { .init = at91ether_init, }; +static const struct macb_config np4_config = { + .caps = MACB_CAPS_USRIO_DISABLED, + .clk_init = macb_clk_init, + .init = macb_init, +}; static const struct macb_config zynqmp_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO, @@ -2796,6 +2795,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,at32ap7000-macb" }, { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, { .compatible = "cdns,macb" }, + { .compatible = "cdns,np4-macb", .data = &np4_config }, { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, { .compatible = "cdns,gem", .data = &pc302gem_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, @@ -2817,6 +2817,7 @@ static int macb_probe(struct platform_device *pdev) = macb_clk_init; int (*init)(struct platform_device *) = macb_init; struct device_node *np = pdev->dev.of_node; + struct device_node *phy_node; const struct macb_config *macb_config = NULL; struct clk *pclk, *hclk, *tx_clk; unsigned int queue_mask, num_queues; @@ -2904,6 +2905,16 @@ static int macb_probe(struct platform_device *pdev) else macb_get_hwaddr(bp); + /* Power up the PHY if there is a GPIO reset */ + phy_node = of_get_next_available_child(np, NULL); + if (phy_node) { + int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); + if (gpio_is_valid(gpio)) + bp->reset_gpio = gpio_to_desc(gpio); + gpiod_set_value(bp->reset_gpio, GPIOD_OUT_HIGH); + } + of_node_put(phy_node); + err = of_get_phy_mode(np); if (err < 0) { pdata = dev_get_platdata(&pdev->dev); @@ -2937,8 +2948,7 @@ static int macb_probe(struct platform_device *pdev) dev->base_addr, dev->irq, dev->dev_addr); phydev = bp->phy_dev; - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; @@ -2968,8 +2978,11 @@ static int macb_remove(struct platform_device *pdev) if (bp->phy_dev) phy_disconnect(bp->phy_dev); mdiobus_unregister(bp->mii_bus); - kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); + + /* Shutdown the PHY if there is a GPIO reset */ + gpiod_set_value(bp->reset_gpio, GPIOD_OUT_LOW); + unregister_netdev(dev); clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d83b0db77821..0d4ecfcd60b7 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -400,6 +400,7 @@ #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 #define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 +#define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -829,6 +830,7 @@ struct macb { unsigned int dma_burst_length; phy_interface_t phy_interface; + struct gpio_desc *reset_gpio; /* AT91RM9200 transmit */ struct sk_buff *skb; /* holds skb until xmit interrupt completes */ diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 39ca6744a4e6..688828865c48 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -265,6 +265,7 @@ struct nicvf { u8 tns_mode:1; u8 sqs_mode:1; u8 loopback_supported:1; + bool hw_tso; u16 mtu; struct queue_set *qs; #define MAX_SQS_PER_VF_SINGLE_NODE 5 @@ -489,6 +490,11 @@ static inline int nic_get_node_id(struct pci_dev *pdev) return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK); } +static inline bool pass1_silicon(struct pci_dev *pdev) +{ + return pdev->revision < 8; +} + int nicvf_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); int nicvf_open(struct net_device *netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 5f24d11cb16a..4dded90076c8 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -54,11 +54,6 @@ struct nicpf { bool irq_allocated[NIC_PF_MSIX_VECTORS]; }; -static inline bool pass1_silicon(struct nicpf *nic) -{ - return nic->pdev->revision < 8; -} - /* Supported devices */ static const struct pci_device_id nic_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, @@ -122,7 +117,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) * when PF writes to MBOX(1), in next revisions when * PF writes to MBOX(0) */ - if (pass1_silicon(nic)) { + if (pass1_silicon(nic->pdev)) { /* see the comment for nic_reg_write()/nic_reg_read() * functions above */ @@ -397,7 +392,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ /* Leave RSS_SIZE as '0' to disable RSS */ - if (pass1_silicon(nic)) { + if (pass1_silicon(nic->pdev)) { nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), (vnic << 24) | (padd << 16) | (rssi_base + rssi)); @@ -467,7 +462,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) } cpi_base = nic->cpi_base[cfg->vf_id]; - if (pass1_silicon(nic)) + if (pass1_silicon(nic->pdev)) idx_addr = NIC_PF_CPI_0_2047_CFG; else idx_addr = NIC_PF_MPI_0_2047_CFG; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index dde8dc720cd3..c24cb2a86a42 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -525,14 +525,22 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt); - nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; - /* For TSO offloaded packets only one head SKB needs to be freed */ + /* For TSO offloaded packets only one SQE will have a valid SKB */ if (skb) { + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); prefetch(skb); dev_consume_skb_any(skb); sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; + } else { + /* In case of HW TSO, HW sends a CQE for each segment of a TSO + * packet instead of a single CQE for the whole TSO packet + * transmitted. Each of this CQE points to the same SQE, so + * avoid freeing same SQE multiple times. + */ + if (!nic->hw_tso) + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } } @@ -1549,6 +1557,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + if (!pass1_silicon(nic->pdev)) + nic->hw_tso = true; + netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 206b6a71a545..d0d1b5490061 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -18,14 +18,6 @@ #include "q_struct.h" #include "nicvf_queues.h" -struct rbuf_info { - struct page *page; - void *data; - u64 offset; -}; - -#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) - /* Poll a register for a specific value */ static int nicvf_poll_reg(struct nicvf *nic, int qidx, u64 reg, int bit_pos, int bits, int val) @@ -86,8 +78,6 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, u32 buf_len, u64 **rbuf) { - u64 data; - struct rbuf_info *rinfo; int order = get_order(buf_len); /* Check if request can be accomodated in previous allocated page */ @@ -113,46 +103,28 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, nic->rb_page_offset = 0; } - data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; - - /* Align buffer addr to cache line i.e 128 bytes */ - rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); - /* Save page address for reference updation */ - rinfo->page = nic->rb_page; - /* Store start address for later retrieval */ - rinfo->data = (void *)data; - /* Store alignment offset */ - rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); + *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); - data += rinfo->offset; - - /* Give next aligned address to hw for DMA */ - *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); return 0; } -/* Retrieve actual buffer start address and build skb for received packet */ +/* Build skb around receive buffer */ static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, u64 rb_ptr, int len) { + void *data; struct sk_buff *skb; - struct rbuf_info *rinfo; - rb_ptr = (u64)phys_to_virt(rb_ptr); - /* Get buffer start address and alignment offset */ - rinfo = GET_RBUF_INFO(rb_ptr); + data = phys_to_virt(rb_ptr); /* Now build an skb to give to stack */ - skb = build_skb(rinfo->data, RCV_FRAG_LEN); + skb = build_skb(data, RCV_FRAG_LEN); if (!skb) { - put_page(rinfo->page); + put_page(virt_to_page(data)); return NULL; } - /* Set correct skb->data */ - skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); - - prefetch((void *)rb_ptr); + prefetch(skb->data); return skb; } @@ -196,7 +168,6 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) int head, tail; u64 buf_addr; struct rbdr_entry_t *desc; - struct rbuf_info *rinfo; if (!rbdr) return; @@ -212,16 +183,14 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) while (head != tail) { desc = GET_RBDR_DESC(rbdr, head); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; - rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); - put_page(rinfo->page); + put_page(virt_to_page(phys_to_virt(buf_addr))); head++; head &= (rbdr->dmem.q_len - 1); } /* Free SKB of tail desc */ desc = GET_RBDR_DESC(rbdr, tail); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; - rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); - put_page(rinfo->page); + put_page(virt_to_page(phys_to_virt(buf_addr))); /* Free RBDR ring */ nicvf_free_q_desc_mem(nic, &rbdr->dmem); @@ -330,7 +299,7 @@ static int nicvf_init_cmp_queue(struct nicvf *nic, return err; cq->desc = cq->dmem.base; - cq->thresh = CMP_QUEUE_CQE_THRESH; + cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; return 0; @@ -956,7 +925,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) { int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; - if (skb_shinfo(skb)->gso_size) { + if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { subdesc_cnt = nicvf_tso_count_subdescs(skb); return subdesc_cnt; } @@ -971,7 +940,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) * First subdescriptor for every send descriptor. */ static inline void -nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, +nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, int subdesc_cnt, struct sk_buff *skb, int len) { int proto; @@ -1007,6 +976,15 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, break; } } + + if (nic->hw_tso && skb_shinfo(skb)->gso_size) { + hdr->tso = 1; + hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); + hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; + /* For non-tunneled pkts, point this to L2 ethertype */ + hdr->inner_l3_offset = skb_network_offset(skb) - 2; + nic->drv_stats.tx_tso++; + } } /* SQ GATHER subdescriptor @@ -1076,7 +1054,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, data_left -= size; tso_build_data(skb, &tso, size); } - nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, + nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, seg_subdescs - 1, skb, seg_len); sq->skbuff[hdr_qentry] = (u64)NULL; qentry = nicvf_get_nxt_sqentry(sq, qentry); @@ -1129,11 +1107,12 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) qentry = nicvf_get_sq_desc(sq, subdesc_cnt); /* Check if its a TSO packet */ - if (skb_shinfo(skb)->gso_size) + if (skb_shinfo(skb)->gso_size && !nic->hw_tso) return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); /* Add SQ header subdesc */ - nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); + nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, + skb, skb->len); /* Add SQ gather subdescs */ qentry = nicvf_get_nxt_sqentry(sq, qentry); @@ -1234,153 +1213,93 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) return skb; } -/* Enable interrupt */ -void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) +static u64 nicvf_int_type_to_mask(int int_type, int q_idx) { u64 reg_val; - reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); - switch (int_type) { case NICVF_INTR_CQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: - reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: - reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: - reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); + reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: - reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); break; default: - netdev_err(nic->netdev, - "Failed to enable interrupt: unknown type\n"); - break; + reg_val = 0; } - nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); + return reg_val; +} + +/* Enable interrupt */ +void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); + + if (!mask) { + netdev_dbg(nic->netdev, + "Failed to enable interrupt: unknown type\n"); + return; + } + nicvf_reg_write(nic, NIC_VF_ENA_W1S, + nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); } /* Disable interrupt */ void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) { - u64 reg_val = 0; + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); - switch (int_type) { - case NICVF_INTR_CQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); - break; - case NICVF_INTR_SQ: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); - break; - case NICVF_INTR_RBDR: - reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); - break; - case NICVF_INTR_PKT_DROP: - reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); - break; - case NICVF_INTR_TCP_TIMER: - reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); - break; - case NICVF_INTR_MBOX: - reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); - break; - case NICVF_INTR_QS_ERR: - reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); - break; - default: - netdev_err(nic->netdev, + if (!mask) { + netdev_dbg(nic->netdev, "Failed to disable interrupt: unknown type\n"); - break; + return; } - nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); + nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); } /* Clear interrupt */ void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) { - u64 reg_val = 0; + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); - switch (int_type) { - case NICVF_INTR_CQ: - reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); - break; - case NICVF_INTR_SQ: - reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); - break; - case NICVF_INTR_RBDR: - reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); - break; - case NICVF_INTR_PKT_DROP: - reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); - break; - case NICVF_INTR_TCP_TIMER: - reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); - break; - case NICVF_INTR_MBOX: - reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); - break; - case NICVF_INTR_QS_ERR: - reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); - break; - default: - netdev_err(nic->netdev, + if (!mask) { + netdev_dbg(nic->netdev, "Failed to clear interrupt: unknown type\n"); - break; + return; } - nicvf_reg_write(nic, NIC_VF_INT, reg_val); + nicvf_reg_write(nic, NIC_VF_INT, mask); } /* Check if interrupt is enabled */ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) { - u64 reg_val; - u64 mask = 0xff; - - reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); - - switch (int_type) { - case NICVF_INTR_CQ: - mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); - break; - case NICVF_INTR_SQ: - mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); - break; - case NICVF_INTR_RBDR: - mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); - break; - case NICVF_INTR_PKT_DROP: - mask = NICVF_INTR_PKT_DROP_MASK; - break; - case NICVF_INTR_TCP_TIMER: - mask = NICVF_INTR_TCP_TIMER_MASK; - break; - case NICVF_INTR_MBOX: - mask = NICVF_INTR_MBOX_MASK; - break; - case NICVF_INTR_QS_ERR: - mask = NICVF_INTR_QS_ERR_MASK; - break; - default: - netdev_err(nic->netdev, + u64 mask = nicvf_int_type_to_mask(int_type, q_idx); + /* If interrupt type is unknown, we treat it disabled. */ + if (!mask) { + netdev_dbg(nic->netdev, "Failed to check interrupt enable: unknown type\n"); - break; + return 0; } - return (reg_val & mask); + return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); } void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 033e8306e91c..c5030a7f213a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -75,7 +75,7 @@ */ #define CMP_QSIZE CMP_QUEUE_SIZE2 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) -#define CMP_QUEUE_CQE_THRESH 0 +#define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ #define RBDR_SIZE RBDR_SIZE0 @@ -83,10 +83,8 @@ #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) #define RBDR_THRESH (RCV_BUF_COUNT / 2) #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ -#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ - (NICVF_RCV_BUF_ALIGN_BYTES * 2)) -#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES +#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) @@ -108,10 +106,6 @@ #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) -#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\ - (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES) -#define NICVF_RCV_BUF_ALIGN_LEN(X)\ - (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X) /* Queue enable/disable */ #define NICVF_SQ_EN BIT_ULL(19) diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h index 3c1de97b1add..9e6d9876bfd0 100644 --- a/drivers/net/ethernet/cavium/thunder/q_struct.h +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h @@ -545,25 +545,28 @@ struct sq_hdr_subdesc { u64 subdesc_cnt:8; u64 csum_l4:2; u64 csum_l3:1; - u64 rsvd0:5; + u64 csum_inner_l4:2; + u64 csum_inner_l3:1; + u64 rsvd0:2; u64 l4_offset:8; u64 l3_offset:8; u64 rsvd1:4; u64 tot_len:20; /* W0 */ - u64 tso_sdc_cont:8; - u64 tso_sdc_first:8; - u64 tso_l4_offset:8; - u64 tso_flags_last:12; - u64 tso_flags_first:12; - u64 rsvd2:2; + u64 rsvd2:24; + u64 inner_l4_offset:8; + u64 inner_l3_offset:8; + u64 tso_start:8; + u64 rsvd3:2; u64 tso_max_paysize:14; /* W1 */ #elif defined(__LITTLE_ENDIAN_BITFIELD) u64 tot_len:20; u64 rsvd1:4; u64 l3_offset:8; u64 l4_offset:8; - u64 rsvd0:5; + u64 rsvd0:2; + u64 csum_inner_l3:1; + u64 csum_inner_l4:2; u64 csum_l3:1; u64 csum_l4:2; u64 subdesc_cnt:8; @@ -574,12 +577,11 @@ struct sq_hdr_subdesc { u64 subdesc_type:4; /* W0 */ u64 tso_max_paysize:14; - u64 rsvd2:2; - u64 tso_flags_first:12; - u64 tso_flags_last:12; - u64 tso_l4_offset:8; - u64 tso_sdc_first:8; - u64 tso_sdc_cont:8; /* W1 */ + u64 rsvd3:2; + u64 tso_start:8; + u64 inner_l3_offset:8; + u64 inner_l4_offset:8; + u64 rsvd2:24; /* W1 */ #endif }; diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index a79813a17b6e..4d187f22c48b 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -65,13 +65,14 @@ config CHELSIO_T3 will be called cxgb3. config CHELSIO_T4 - tristate "Chelsio Communications T4/T5 Ethernet support" + tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) select FW_LOADER select MDIO ---help--- - This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet - adapter and T5 based 40Gb Ethernet adapter. + This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet + adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb + Ethernet adapters. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. @@ -85,7 +86,7 @@ config CHELSIO_T4 will be called cxgb4. config CHELSIO_T4_DCB - bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards" + bool "Data Center Bridging (DCB) Support for Chelsio T4/T5/T6 cards" default n depends on CHELSIO_T4 && DCB ---help--- @@ -107,12 +108,12 @@ config CHELSIO_T4_FCOE If unsure, say N. config CHELSIO_T4VF - tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" + tristate "Chelsio Communications T4/T5/T6 Virtual Function Ethernet support" depends on PCI ---help--- - This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet - adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual - Functions. + This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet + adapters and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb + Ethernet adapters with PCI-E SR-IOV Virtual Functions. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h index a4d2a4c08d3f..bf43da6c6a63 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cphy.h +++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h @@ -137,7 +137,7 @@ static inline int simple_mdio_write(struct cphy *cphy, int reg, /* Convenience initializer */ static inline void cphy_init(struct cphy *phy, struct net_device *dev, - int phy_addr, struct cphy_ops *phy_ops, + int phy_addr, const struct cphy_ops *phy_ops, const struct mdio_ops *mdio_ops) { struct adapter *adapter = netdev_priv(dev); diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c index 71018a4fdf15..76ce6e538326 100644 --- a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c +++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c @@ -337,7 +337,7 @@ static void mv88e1xxx_destroy(struct cphy *cphy) kfree(cphy); } -static struct cphy_ops mv88e1xxx_ops = { +static const struct cphy_ops mv88e1xxx_ops = { .destroy = mv88e1xxx_destroy, .reset = mv88e1xxx_reset, .interrupt_enable = mv88e1xxx_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c index d0cf611551a1..7ddb301bcba0 100644 --- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c +++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c @@ -195,7 +195,7 @@ static void mv88x201x_destroy(struct cphy *cphy) kfree(cphy); } -static struct cphy_ops mv88x201x_ops = { +static const struct cphy_ops mv88x201x_ops = { .destroy = mv88x201x_destroy, .reset = mv88x201x_reset, .interrupt_enable = mv88x201x_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c index a683fd3bb624..d546f46c8ef7 100644 --- a/drivers/net/ethernet/chelsio/cxgb/my3126.c +++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c @@ -154,7 +154,7 @@ static void my3126_destroy(struct cphy *cphy) kfree(cphy); } -static struct cphy_ops my3126_ops = { +static const struct cphy_ops my3126_ops = { .destroy = my3126_destroy, .reset = my3126_reset, .interrupt_enable = my3126_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c index ec5e05052d99..eb462d7db427 100644 --- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -570,7 +570,7 @@ static void pm3393_destroy(struct cmac *cmac) kfree(cmac); } -static struct cmac_ops pm3393_ops = { +static const struct cmac_ops pm3393_ops = { .destroy = pm3393_destroy, .reset = pm3393_reset, .interrupt_enable = pm3393_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c index b0cb388f5e12..6f30b6f78553 100644 --- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -666,7 +666,7 @@ static void mac_destroy(struct cmac *mac) kfree(mac); } -static struct cmac_ops vsc7326_ops = { +static const struct cmac_ops vsc7326_ops = { .destroy = mac_destroy, .reset = mac_reset, .interrupt_handler = mac_intr_handler, diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c index 2028da95afa1..dadf11e3dddb 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c +++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c @@ -198,7 +198,7 @@ static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed, return 0; } -static struct cphy_ops ael1002_ops = { +static const struct cphy_ops ael1002_ops = { .reset = ael1002_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, @@ -224,7 +224,7 @@ static int ael1006_reset(struct cphy *phy, int wait) return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait); } -static struct cphy_ops ael1006_ops = { +static const struct cphy_ops ael1006_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, @@ -495,7 +495,7 @@ static int ael2005_intr_handler(struct cphy *phy) return ret ? ret : cphy_cause_link_change; } -static struct cphy_ops ael2005_ops = { +static const struct cphy_ops ael2005_ops = { .reset = ael2005_reset, .intr_enable = ael2005_intr_enable, .intr_disable = ael2005_intr_disable, @@ -801,7 +801,7 @@ static int ael2020_intr_handler(struct cphy *phy) return ret ? ret : cphy_cause_link_change; } -static struct cphy_ops ael2020_ops = { +static const struct cphy_ops ael2020_ops = { .reset = ael2020_reset, .intr_enable = ael2020_intr_enable, .intr_disable = ael2020_intr_disable, @@ -856,7 +856,7 @@ static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, return 0; } -static struct cphy_ops qt2045_ops = { +static const struct cphy_ops qt2045_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, @@ -921,7 +921,7 @@ static int xaui_direct_power_down(struct cphy *phy, int enable) return 0; } -static struct cphy_ops xaui_direct_ops = { +static const struct cphy_ops xaui_direct_ops = { .reset = xaui_direct_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c index 341b7ef1508f..6af5d200e44f 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c +++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c @@ -247,7 +247,7 @@ static int aq100x_get_link_status(struct cphy *phy, int *link_ok, return 0; } -static struct cphy_ops aq100x_ops = { +static const struct cphy_ops aq100x_ops = { .reset = aq100x_reset, .intr_enable = aq100x_intr_enable, .intr_disable = aq100x_intr_disable, diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h index 442480982d3f..1bd7d89666c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/common.h +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@ -575,7 +575,7 @@ static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg, /* Convenience initializer */ static inline void cphy_init(struct cphy *phy, struct adapter *adapter, - int phy_addr, struct cphy_ops *phy_ops, + int phy_addr, const struct cphy_ops *phy_ops, const struct mdio_ops *mdio_ops, unsigned int caps, const char *desc) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 8f7aa53a4c4b..60908eab3b3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -701,15 +701,16 @@ static ssize_t attr_store(struct device *d, ssize_t(*set) (struct net_device *, unsigned int), unsigned int min_val, unsigned int max_val) { - char *endp; ssize_t ret; unsigned int val; if (!capable(CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val < min_val || val > max_val) + ret = kstrtouint(buf, 0, &val); + if (ret) + return ret; + if (val < min_val || val > max_val) return -EINVAL; rtnl_lock(); @@ -829,14 +830,15 @@ static ssize_t tm_attr_store(struct device *d, struct port_info *pi = netdev_priv(to_net_dev(d)); struct adapter *adap = pi->adapter; unsigned int val; - char *endp; ssize_t ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val > 10000000) + ret = kstrtouint(buf, 0, &val); + if (ret) + return ret; + if (val > 10000000) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index a22768c94200..ee04caa6c4d8 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -709,11 +709,21 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) return ret; } - p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); - p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); - p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); - p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); - p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); + ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); + if (ret) + return ret; + ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); + if (ret) + return ret; + ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); + if (ret) + return ret; + ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); + if (ret) + return ret; + ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); + if (ret) + return ret; memcpy(p->sn, vpd.sn_data, SERNUM_LEN); /* Old eeproms didn't have port information */ @@ -723,8 +733,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) } else { p->port_type[0] = hex_to_bin(vpd.port0_data[0]); p->port_type[1] = hex_to_bin(vpd.port1_data[0]); - p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); - p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); + ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); + if (ret) + return ret; + ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); + if (ret) + return ret; } ret = hex2bin(p->eth_base, vpd.na_data, 6); diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c index 4f9a1c2724f4..8638ad42bf60 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c +++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c @@ -336,7 +336,7 @@ static int vsc8211_intr_handler(struct cphy *cphy) return cphy_cause; } -static struct cphy_ops vsc8211_ops = { +static const struct cphy_ops vsc8211_ops = { .reset = vsc8211_reset, .intr_enable = vsc8211_intr_enable, .intr_disable = vsc8211_intr_disable, @@ -350,7 +350,7 @@ static struct cphy_ops vsc8211_ops = { .power_down = vsc8211_power_down, }; -static struct cphy_ops vsc8211_fiber_ops = { +static const struct cphy_ops vsc8211_fiber_ops = { .reset = vsc8211_reset, .intr_enable = vsc8211_intr_enable, .intr_disable = vsc8211_intr_disable, diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 11dd91e4db56..7ad43af6bde1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -118,6 +118,11 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); if (ret) { write_unlock_bh(&ctbl->lock); + dev_err(adap->pdev_dev, + "CLIP FW cmd failed with error %d, " + "Connections using %pI6c wont be " + "offloaded", + ret, ce->addr6.sin6_addr.s6_addr); return ret; } } else { @@ -127,6 +132,9 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) } } else { write_unlock_bh(&ctbl->lock); + dev_info(adap->pdev_dev, "CLIP table overflow, " + "Connections using %pI6c wont be offloaded", + (void *)lip); return -ENOMEM; } write_unlock_bh(&ctbl->lock); @@ -146,6 +154,9 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) int hash; int ret = -1; + if (!ctbl) + return; + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 55a47de544ea..ec6e849676c1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -301,6 +301,8 @@ struct devlog_params { /* Stores chip specific parameters */ struct arch_specific_params { u8 nchan; + u8 pm_stats_cnt; + u8 cng_ch_bits_log; /* congestion channel map bits width */ u16 mps_rplc_size; u16 vfcount; u32 sge_fl_db; @@ -398,11 +400,10 @@ struct link_config { enum { MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ - MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ + MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ - MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ }; enum { @@ -420,7 +421,7 @@ enum { INGQ_EXTRAS = 2, /* firmware event queue and */ /* forwarded interrupts */ MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES - + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, + + MAX_RDMA_CIQS + INGQ_EXTRAS, }; struct adapter; @@ -483,6 +484,8 @@ struct sge_fl { /* SGE free-buffer queue state */ unsigned int pidx; /* producer index */ unsigned long alloc_failed; /* # of times buffer allocation failed */ unsigned long large_alloc_failed; + unsigned long mapping_err; /* # of RX Buffer DMA Mapping failures */ + unsigned long low; /* # of times momentarily starving */ unsigned long starving; /* RO fields */ unsigned int cntxt_id; /* SGE context id for the free list */ @@ -618,6 +621,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */ struct adapter *adap; struct sk_buff_head sendq; /* list of backpressured packets */ struct tasklet_struct qresume_tsk; /* restarts the queue */ + bool service_ofldq_running; /* service_ofldq() is processing sendq */ u8 full; /* the Tx ring is full */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ } ____cacheline_aligned_in_smp; @@ -636,7 +640,7 @@ struct sge { struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; - struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS]; struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; @@ -647,10 +651,10 @@ struct sge { u16 max_ethqsets; /* # of available Ethernet queue sets */ u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethtxq_rover; /* Tx queue to clean up next */ - u16 ofldqsets; /* # of active offload queue sets */ + u16 iscsiqsets; /* # of active iSCSI queue sets */ u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ - u16 ofld_rxq[MAX_OFLD_QSETS]; + u16 iscsi_rxq[MAX_OFLD_QSETS]; u16 rdma_rxq[MAX_RDMA_QUEUES]; u16 rdma_ciq[MAX_RDMA_CIQS]; u16 timer_val[SGE_NTIMERS]; @@ -676,7 +680,7 @@ struct sge { }; #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) -#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) +#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++) #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) @@ -1253,6 +1257,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver); int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force); +int t4_fl_pkt_align(struct adapter *adap); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adap); int t4_get_fw_version(struct adapter *adapter, u32 *vers); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 4269944c5db5..e6a4072b494b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -757,8 +757,8 @@ static int pm_stats_show(struct seq_file *seq, void *v) }; int i; - u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS]; - u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS]; + u32 tx_cnt[T6_PM_NSTATS], rx_cnt[T6_PM_NSTATS]; + u64 tx_cyc[T6_PM_NSTATS], rx_cyc[T6_PM_NSTATS]; struct adapter *adap = seq->private; t4_pmtx_get_stats(adap, tx_cnt, tx_cyc); @@ -773,6 +773,32 @@ static int pm_stats_show(struct seq_file *seq, void *v) for (i = 0; i < PM_NSTATS - 1; i++) seq_printf(seq, "%-13s %10u %20llu\n", rx_pm_stats[i], rx_cnt[i], rx_cyc[i]); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + /* In T5 the granularity of the total wait is too fine. + * It is not useful as it reaches the max value too fast. + * Hence display this Input FIFO wait for T6 onwards. + */ + seq_printf(seq, "%13s %10s %20s\n", + " ", "Total wait", "Total Occupancy"); + seq_printf(seq, "Tx FIFO wait %10u %20llu\n", + tx_cnt[i], tx_cyc[i]); + seq_printf(seq, "Rx FIFO wait %10u %20llu\n", + rx_cnt[i], rx_cyc[i]); + + /* Skip index 6 as there is nothing useful ihere */ + i += 2; + + /* At index 7, a new stat for read latency (count, total wait) + * is added. + */ + seq_printf(seq, "%13s %10s %20s\n", + " ", "Reads", "Total wait"); + seq_printf(seq, "Tx latency %10u %20llu\n", + tx_cnt[i], tx_cyc[i]); + seq_printf(seq, "Rx latency %10u %20llu\n", + rx_cnt[i], rx_cyc[i]); + } return 0; } @@ -1559,25 +1585,35 @@ static int mps_tcam_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); - if (v == SEQ_START_TOKEN) { - if (adap->params.arch.mps_rplc_size > 128) + if (chip_ver > CHELSIO_T5) { seq_puts(seq, "Idx Ethernet address Mask " + " VNI Mask IVLAN Vld " + "DIP_Hit Lookup Port " "Vld Ports PF VF " "Replication " " P0 P1 P2 P3 ML\n"); - else - seq_puts(seq, "Idx Ethernet address Mask " - "Vld Ports PF VF Replication" - " P0 P1 P2 P3 ML\n"); + } else { + if (adap->params.arch.mps_rplc_size > 128) + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF " + "Replication " + " P0 P1 P2 P3 ML\n"); + else + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF Replication" + " P0 P1 P2 P3 ML\n"); + } } else { u64 mask; u8 addr[ETH_ALEN]; - bool replicate; + bool replicate, dip_hit = false, vlan_vld = false; unsigned int idx = (uintptr_t)v - 2; u64 tcamy, tcamx, val; - u32 cls_lo, cls_hi, ctl; + u32 cls_lo, cls_hi, ctl, data2, vnix = 0, vniy = 0; u32 rplc[8] = {0}; + u8 lookup_type = 0, port_num = 0; + u16 ivlan = 0; if (chip_ver > CHELSIO_T5) { /* CtlCmdType - 0: Read, 1: Write @@ -1596,6 +1632,22 @@ static int mps_tcam_show(struct seq_file *seq, void *v) val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); tcamy = DMACH_G(val) << 32; tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A); + lookup_type = DATALKPTYPE_G(data2); + /* 0 - Outer header, 1 - Inner header + * [71:48] bit locations are overloaded for + * outer vs. inner lookup types. + */ + if (lookup_type && (lookup_type != DATALKPTYPE_M)) { + /* Inner header VNI */ + vniy = ((data2 & DATAVIDH2_F) << 23) | + (DATAVIDH1_G(data2) << 16) | VIDL_G(val); + dip_hit = data2 & DATADIPHIT_F; + } else { + vlan_vld = data2 & DATAVIDH2_F; + ivlan = VIDL_G(val); + } + port_num = DATAPORTNUM_G(data2); /* Read tcamx. Change the control param */ ctl |= CTLXYBITSEL_V(1); @@ -1603,6 +1655,12 @@ static int mps_tcam_show(struct seq_file *seq, void *v) val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); tcamx = DMACH_G(val) << 32; tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A); + if (lookup_type && (lookup_type != DATALKPTYPE_M)) { + /* Inner header VNI mask */ + vnix = ((data2 & DATAVIDH2_F) << 23) | + (DATAVIDH1_G(data2) << 16) | VIDL_G(val); + } } else { tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); @@ -1662,17 +1720,47 @@ static int mps_tcam_show(struct seq_file *seq, void *v) } tcamxy2valmask(tcamx, tcamy, addr, &mask); - if (chip_ver > CHELSIO_T5) - seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " - "%012llx%3c %#x%4u%4d", - idx, addr[0], addr[1], addr[2], addr[3], - addr[4], addr[5], (unsigned long long)mask, - (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', - PORTMAP_G(cls_hi), - T6_PF_G(cls_lo), - (cls_lo & T6_VF_VALID_F) ? - T6_VF_G(cls_lo) : -1); - else + if (chip_ver > CHELSIO_T5) { + /* Inner header lookup */ + if (lookup_type && (lookup_type != DATALKPTYPE_M)) { + seq_printf(seq, + "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx %06x %06x - - %3c" + " 'I' %4x " + "%3c %#x%4u%4d", idx, addr[0], + addr[1], addr[2], addr[3], + addr[4], addr[5], + (unsigned long long)mask, + vniy, vnix, dip_hit ? 'Y' : 'N', + port_num, + (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + T6_PF_G(cls_lo), + (cls_lo & T6_VF_VALID_F) ? + T6_VF_G(cls_lo) : -1); + } else { + seq_printf(seq, + "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx - - ", + idx, addr[0], addr[1], addr[2], + addr[3], addr[4], addr[5], + (unsigned long long)mask); + + if (vlan_vld) + seq_printf(seq, "%4u Y ", ivlan); + else + seq_puts(seq, " - N "); + + seq_printf(seq, + "- %3c %4x %3c %#x%4u%4d", + lookup_type ? 'I' : 'O', port_num, + (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + T6_PF_G(cls_lo), + (cls_lo & T6_VF_VALID_F) ? + T6_VF_G(cls_lo) : -1); + } + } else seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012llx%3c %#x%4u%4d", idx, addr[0], addr[1], addr[2], addr[3], @@ -2245,7 +2333,7 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); - int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); + int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4); int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); @@ -2325,14 +2413,16 @@ do { \ TL("TxMapErr:", mapping_err); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (iscsi_idx < iscsi_entries) { const struct sge_ofld_rxq *rx = - &adap->sge.ofldrxq[iscsi_idx * 4]; + &adap->sge.iscsirxq[iscsi_idx * 4]; const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[iscsi_idx * 4]; - int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx); + int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx); S("QType:", "iSCSI"); T("TxQ ID:", q.cntxt_id); @@ -2359,6 +2449,8 @@ do { \ RL("RxNoMem:", stats.nomem); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (rdma_idx < rdma_entries) { @@ -2388,6 +2480,8 @@ do { \ RL("RxNoMem:", stats.nomem); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (ciq_idx < ciq_entries) { @@ -2448,7 +2542,7 @@ do { \ static int sge_queue_entries(const struct adapter *adap) { return DIV_ROUND_UP(adap->sge.ethqsets, 4) + - DIV_ROUND_UP(adap->sge.ofldqsets, 4) + + DIV_ROUND_UP(adap->sge.iscsiqsets, 4) + DIV_ROUND_UP(adap->sge.rdmaqs, 4) + DIV_ROUND_UP(adap->sge.rdmaciqs, 4) + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index a077f9476daf..7a0b92b2f73c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val) } static const char stats_strings[][ETH_GSTRING_LEN] = { - "tx_octets_ok ", - "tx_frames_ok ", - "tx_broadcast_frames ", - "tx_multicast_frames ", - "tx_unicast_frames ", - "tx_error_frames ", - - "tx_frames_64 ", - "tx_frames_65_to_127 ", - "tx_frames_128_to_255 ", - "tx_frames_256_to_511 ", - "tx_frames_512_to_1023 ", - "tx_frames_1024_to_1518 ", - "tx_frames_1519_to_max ", - - "tx_frames_dropped ", - "tx_pause_frames ", - "tx_ppp0_frames ", - "tx_ppp1_frames ", - "tx_ppp2_frames ", - "tx_ppp3_frames ", - "tx_ppp4_frames ", - "tx_ppp5_frames ", - "tx_ppp6_frames ", - "tx_ppp7_frames ", - - "rx_octets_ok ", - "rx_frames_ok ", - "rx_broadcast_frames ", - "rx_multicast_frames ", - "rx_unicast_frames ", - - "rx_frames_too_long ", - "rx_jabber_errors ", - "rx_fcs_errors ", - "rx_length_errors ", - "rx_symbol_errors ", - "rx_runt_frames ", - - "rx_frames_64 ", - "rx_frames_65_to_127 ", - "rx_frames_128_to_255 ", - "rx_frames_256_to_511 ", - "rx_frames_512_to_1023 ", - "rx_frames_1024_to_1518 ", - "rx_frames_1519_to_max ", - - "rx_pause_frames ", - "rx_ppp0_frames ", - "rx_ppp1_frames ", - "rx_ppp2_frames ", - "rx_ppp3_frames ", - "rx_ppp4_frames ", - "rx_ppp5_frames ", - "rx_ppp6_frames ", - "rx_ppp7_frames ", - - "rx_bg0_frames_dropped ", - "rx_bg1_frames_dropped ", - "rx_bg2_frames_dropped ", - "rx_bg3_frames_dropped ", - "rx_bg0_frames_trunc ", - "rx_bg1_frames_trunc ", - "rx_bg2_frames_trunc ", - "rx_bg3_frames_trunc ", - - "tso ", - "tx_csum_offload ", - "rx_csum_good ", - "vlan_extractions ", - "vlan_insertions ", - "gro_packets ", - "gro_merged ", + "tx_octets_ok ", + "tx_frames_ok ", + "tx_broadcast_frames ", + "tx_multicast_frames ", + "tx_unicast_frames ", + "tx_error_frames ", + + "tx_frames_64 ", + "tx_frames_65_to_127 ", + "tx_frames_128_to_255 ", + "tx_frames_256_to_511 ", + "tx_frames_512_to_1023 ", + "tx_frames_1024_to_1518 ", + "tx_frames_1519_to_max ", + + "tx_frames_dropped ", + "tx_pause_frames ", + "tx_ppp0_frames ", + "tx_ppp1_frames ", + "tx_ppp2_frames ", + "tx_ppp3_frames ", + "tx_ppp4_frames ", + "tx_ppp5_frames ", + "tx_ppp6_frames ", + "tx_ppp7_frames ", + + "rx_octets_ok ", + "rx_frames_ok ", + "rx_broadcast_frames ", + "rx_multicast_frames ", + "rx_unicast_frames ", + + "rx_frames_too_long ", + "rx_jabber_errors ", + "rx_fcs_errors ", + "rx_length_errors ", + "rx_symbol_errors ", + "rx_runt_frames ", + + "rx_frames_64 ", + "rx_frames_65_to_127 ", + "rx_frames_128_to_255 ", + "rx_frames_256_to_511 ", + "rx_frames_512_to_1023 ", + "rx_frames_1024_to_1518 ", + "rx_frames_1519_to_max ", + + "rx_pause_frames ", + "rx_ppp0_frames ", + "rx_ppp1_frames ", + "rx_ppp2_frames ", + "rx_ppp3_frames ", + "rx_ppp4_frames ", + "rx_ppp5_frames ", + "rx_ppp6_frames ", + "rx_ppp7_frames ", + + "rx_bg0_frames_dropped ", + "rx_bg1_frames_dropped ", + "rx_bg2_frames_dropped ", + "rx_bg3_frames_dropped ", + "rx_bg0_frames_trunc ", + "rx_bg1_frames_trunc ", + "rx_bg2_frames_trunc ", + "rx_bg3_frames_trunc ", + + "tso ", + "tx_csum_offload ", + "rx_csum_good ", + "vlan_extractions ", + "vlan_insertions ", + "gro_packets ", + "gro_merged ", }; static char adapter_stats_strings[][ETH_GSTRING_LEN] = { @@ -686,7 +686,7 @@ static int set_pauseparam(struct net_device *dev, if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (netif_running(dev)) - return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, + return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0d147610a06f..b8a5fb0c32d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -162,19 +162,8 @@ MODULE_FIRMWARE(FW6_FNAME); static uint force_init; module_param(force_init, uint, 0644); -MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter"); - -/* - * Normally if the firmware we connect to has Configuration File support, we - * use that and only fall back to the old Driver-based initialization if the - * Configuration File fails for some reason. If force_old_init is set, then - * we'll always use the old Driver-based initialization sequence. - */ -static uint force_old_init; - -module_param(force_old_init, uint, 0644); -MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated" - " parameter"); +MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter," + "deprecated parameter"); static int dflt_msg_enable = DFLT_MSG_ENABLE; @@ -196,23 +185,6 @@ module_param(msi, int, 0644); MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); /* - * Queue interrupt hold-off timer values. Queues default to the first of these - * upon creation. - */ -static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; - -module_param_array(intr_holdoff, uint, NULL, 0644); -MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " - "0..4 in microseconds, deprecated parameter"); - -static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; - -module_param_array(intr_cnt, uint, NULL, 0644); -MODULE_PARM_DESC(intr_cnt, - "thresholds 1..3 for queue interrupt packet counters, " - "deprecated parameter"); - -/* * Normally we tell the chip to deliver Ingress Packets into our DMA buffers * offset by 2 bytes in order to have the IP headers line up on 4-byte * boundaries. This is a requirement for many architectures which will throw @@ -226,13 +198,7 @@ MODULE_PARM_DESC(intr_cnt, */ static int rx_dma_offset = 2; -static bool vf_acls; - #ifdef CONFIG_PCI_IOV -module_param(vf_acls, bool, 0644); -MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, " - "deprecated parameter"); - /* Configure the number of PCI-E Virtual Function which are to be instantiated * on SR-IOV Capable Physical Functions. */ @@ -253,12 +219,6 @@ module_param(select_queue, int, 0644); MODULE_PARM_DESC(select_queue, "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); -static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC; - -module_param(tp_vlan_pri_map, uint, 0644); -MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, " - "deprecated parameter"); - static struct dentry *cxgb4_debugfs_root; static LIST_HEAD(adapter_list); @@ -766,8 +726,8 @@ static void name_msix_vecs(struct adapter *adap) } /* offload queues */ - for_each_ofldrxq(&adap->sge, i) - snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", + for_each_iscsirxq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d", adap->port[0]->name, i); for_each_rdmarxq(&adap->sge, i) @@ -782,7 +742,7 @@ static void name_msix_vecs(struct adapter *adap) static int request_msix_queue_irqs(struct adapter *adap) { struct sge *s = &adap->sge; - int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; + int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; int msi_index = 2; err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, @@ -799,11 +759,11 @@ static int request_msix_queue_irqs(struct adapter *adap) goto unwind; msi_index++; } - for_each_ofldrxq(s, ofldqidx) { + for_each_iscsirxq(s, iscsiqidx) { err = request_irq(adap->msix_info[msi_index].vec, t4_sge_intr_msix, 0, adap->msix_info[msi_index].desc, - &s->ofldrxq[ofldqidx].rspq); + &s->iscsirxq[iscsiqidx].rspq); if (err) goto unwind; msi_index++; @@ -835,9 +795,9 @@ unwind: while (--rdmaqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, &s->rdmarxq[rdmaqidx].rspq); - while (--ofldqidx >= 0) + while (--iscsiqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, - &s->ofldrxq[ofldqidx].rspq); + &s->iscsirxq[iscsiqidx].rspq); while (--ethqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, &s->ethrxq[ethqidx].rspq); @@ -853,8 +813,9 @@ static void free_msix_queue_irqs(struct adapter *adap) free_irq(adap->msix_info[1].vec, &s->fw_evtq); for_each_ethrxq(s, i) free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); - for_each_ofldrxq(s, i) - free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); + for_each_iscsirxq(s, i) + free_irq(adap->msix_info[msi_index++].vec, + &s->iscsirxq[i].rspq); for_each_rdmarxq(s, i) free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); for_each_rdmaciq(s, i) @@ -1093,8 +1054,8 @@ freeout: t4_free_sge_resources(adap); } } - j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ - for_each_ofldrxq(s, i) { + j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */ + for_each_iscsirxq(s, i) { err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], adap->port[i / j], s->fw_evtq.cntxt_id); @@ -1110,7 +1071,7 @@ freeout: t4_free_sge_resources(adap); msi_idx += nq; \ } while (0) - ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq); + ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq); ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); @@ -1181,16 +1142,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx) */ if (f->fs.newdmac || f->fs.newvlan) { /* allocate L2T entry for new filter */ - f->l2t = t4_l2t_alloc_switching(adapter->l2t); + f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, + f->fs.eport, f->fs.dmac); if (f->l2t == NULL) { kfree_skb(skb); - return -EAGAIN; - } - if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, - f->fs.eport, f->fs.dmac)) { - cxgb4_l2t_release(f->l2t); - f->l2t = NULL; - kfree_skb(skb); return -ENOMEM; } } @@ -1511,7 +1466,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) else stid = -1; } else { - stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); + stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); if (stid < 0) stid = -1; } @@ -1525,7 +1480,7 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) if (family == PF_INET) t->stids_in_use++; else - t->stids_in_use += 4; + t->stids_in_use += 2; } spin_unlock_bh(&t->stid_lock); return stid; @@ -1576,13 +1531,13 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) if (family == PF_INET) __clear_bit(stid, t->stid_bmap); else - bitmap_release_region(t->stid_bmap, stid, 2); + bitmap_release_region(t->stid_bmap, stid, 1); t->stid_tab[stid].data = NULL; if (stid < t->nstids) { if (family == PF_INET) t->stids_in_use--; else - t->stids_in_use -= 4; + t->stids_in_use -= 2; } else { t->sftids_in_use--; } @@ -2283,7 +2238,7 @@ static void disable_dbs(struct adapter *adap) for_each_ethrxq(&adap->sge, i) disable_txq_db(&adap->sge.ethtxq[i].q); - for_each_ofldrxq(&adap->sge, i) + for_each_iscsirxq(&adap->sge, i) disable_txq_db(&adap->sge.ofldtxq[i].q); for_each_port(adap, i) disable_txq_db(&adap->sge.ctrlq[i].q); @@ -2295,7 +2250,7 @@ static void enable_dbs(struct adapter *adap) for_each_ethrxq(&adap->sge, i) enable_txq_db(adap, &adap->sge.ethtxq[i].q); - for_each_ofldrxq(&adap->sge, i) + for_each_iscsirxq(&adap->sge, i) enable_txq_db(adap, &adap->sge.ofldtxq[i].q); for_each_port(adap, i) enable_txq_db(adap, &adap->sge.ctrlq[i].q); @@ -2365,7 +2320,7 @@ static void recover_all_queues(struct adapter *adap) for_each_ethrxq(&adap->sge, i) sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); - for_each_ofldrxq(&adap->sge, i) + for_each_iscsirxq(&adap->sge, i) sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); for_each_port(adap, i) sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); @@ -2449,10 +2404,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.nrxq = adap->sge.rdmaqs; lli.nciq = adap->sge.rdmaciqs; } else if (uld == CXGB4_ULD_ISCSI) { - lli.rxq_ids = adap->sge.ofld_rxq; - lli.nrxq = adap->sge.ofldqsets; + lli.rxq_ids = adap->sge.iscsi_rxq; + lli.nrxq = adap->sge.iscsiqsets; } - lli.ntxq = adap->sge.ofldqsets; + lli.ntxq = adap->sge.iscsiqsets; lli.nchan = adap->params.nports; lli.nports = adap->params.nports; lli.wr_cred = adap->params.ofldq_wr_cred; @@ -3146,16 +3101,6 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) if (ret < 0) return ret; - /* select capabilities we'll be using */ - if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { - if (!vf_acls) - c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); - else - c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); - } else if (vf_acls) { - dev_err(adap->pdev_dev, "virtualization ACLs not supported"); - return ret; - } c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); @@ -4348,11 +4293,11 @@ static void cfg_queues(struct adapter *adap) * capped by the number of available cores. */ if (n10g) { - i = min_t(int, ARRAY_SIZE(s->ofldrxq), + i = min_t(int, ARRAY_SIZE(s->iscsirxq), num_online_cpus()); - s->ofldqsets = roundup(i, adap->params.nports); + s->iscsiqsets = roundup(i, adap->params.nports); } else - s->ofldqsets = adap->params.nports; + s->iscsiqsets = adap->params.nports; /* For RDMA one Rx queue per channel suffices */ s->rdmaqs = adap->params.nports; /* Try and allow at least 1 CIQ per cpu rounding down @@ -4383,8 +4328,8 @@ static void cfg_queues(struct adapter *adap) for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) s->ofldtxq[i].q.size = 1024; - for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { - struct sge_ofld_rxq *r = &s->ofldrxq[i]; + for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) { + struct sge_ofld_rxq *r = &s->iscsirxq[i]; init_rspq(adap, &r->rspq, 5, 1, 1024, 64); r->rspq.uld = CXGB4_ULD_ISCSI; @@ -4465,7 +4410,7 @@ static int enable_msix(struct adapter *adap) want = s->max_ethqsets + EXTRA_VECS; if (is_offload(adap)) { - want += s->rdmaqs + s->rdmaciqs + s->ofldqsets; + want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets; /* need nchan for each possible ULD */ ofld_need = 3 * nchan; } @@ -4504,13 +4449,13 @@ static int enable_msix(struct adapter *adap) /* leftovers go to OFLD */ i = allocated - EXTRA_VECS - s->max_ethqsets - s->rdmaqs - s->rdmaciqs; - s->ofldqsets = (i / nchan) * nchan; /* round down */ + s->iscsiqsets = (i / nchan) * nchan; /* round down */ } for (i = 0; i < allocated; ++i) adap->msix_info[i].vec = entries[i].vector; dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " "nic %d iscsi %d rdma cpl %d rdma ciq %d\n", - allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs, + allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs, s->rdmaciqs); kfree(entries); @@ -4538,6 +4483,79 @@ static int init_rss(struct adapter *adap) return 0; } +static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u32 lnkcap1, lnkcap2; + int err1, err2; + +#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP, + &lnkcap1); + err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2, + &lnkcap2); + if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + *speed = PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + if (!err1) { + *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; + if (!lnkcap2) { /* pre-r3.0 */ + if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + } + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return err1 ? err1 : err2 ? err2 : -EINVAL; + return 0; +} + +static void cxgb4_check_pcie_caps(struct adapter *adap) +{ + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + +#define PCIE_SPEED_STR(speed) \ + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ + "Unknown") + + if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) { + dev_warn(adap->pdev_dev, + "Unable to determine PCIe device BW capabilities\n"); + return; + } + + if (pcie_get_minimum_link(adap->pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + dev_warn(adap->pdev_dev, + "Unable to determine PCI Express bandwidth.\n"); + return; + } + + dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n", + PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); + dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n", + width, width_cap); + if (speed < speed_cap || width < width_cap) + dev_info(adap->pdev_dev, + "A slot with more lanes and/or higher speed is " + "suggested for optimal performance.\n"); +} + static void print_port_info(const struct net_device *dev) { char buf[80]; @@ -4565,10 +4583,10 @@ static void print_port_info(const struct net_device *dev) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); - netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", + netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n", adap->params.vpd.id, CHELSIO_CHIP_RELEASE(adap->params.chip), buf, - is_offload(adap) ? "R" : "", adap->params.pci.width, spd, + is_offload(adap) ? "R" : "", (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); netdev_info(dev, "S/N: %s, P/N: %s\n", @@ -4787,8 +4805,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* configure SGE_STAT_CFG_A to read WC stats */ if (!is_t4(adapter->params.chip)) - t4_write_reg(adapter, SGE_STAT_CFG_A, - STATSOURCE_T5_V(7) | STATMODE_V(0)); + t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | + (is_t5(adapter->params.chip) ? STATMODE_V(0) : + T6_STATMODE_V(0))); for_each_port(adapter, i) { struct net_device *netdev; @@ -4865,15 +4884,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } #if IS_ENABLED(CONFIG_IPV6) - adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, - adapter->clipt_end); - if (!adapter->clipt) { - /* We tolerate a lack of clip_table, giving up - * some functionality + if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && + (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { + /* CLIP functionality is not present in hardware, + * hence disable all offload features */ dev_warn(&pdev->dev, - "could not allocate Clip table, continuing\n"); + "CLIP not enabled in hardware, continuing\n"); adapter->params.offload = 0; + } else { + adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, + adapter->clipt_end); + if (!adapter->clipt) { + /* We tolerate a lack of clip_table, giving up + * some functionality + */ + dev_warn(&pdev->dev, + "could not allocate Clip table, continuing\n"); + adapter->params.offload = 0; + } } #endif if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { @@ -4904,6 +4933,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else if (msi > 0 && pci_enable_msi(pdev) == 0) adapter->flags |= USING_MSI; + /* check for PCI Express bandwidth capabiltites */ + cxgb4_check_pcie_caps(adapter); + err = init_rss(adapter); if (err) goto out_free_dev; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index ac27898c6ab0..5b0f3ef348e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -66,7 +66,7 @@ struct l2t_data { static inline unsigned int vlan_prio(const struct l2t_entry *e) { - return e->vlan >> 13; + return e->vlan >> VLAN_PRIO_SHIFT; } static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) @@ -161,8 +161,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); - set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - t4_ofld_send(adap, skb); + t4_mgmt_tx(adap, skb); if (sync && e->state != L2T_STATE_SWITCHING) e->state = L2T_STATE_SYNC_WRITE; @@ -175,14 +174,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) */ static void send_pending(struct adapter *adap, struct l2t_entry *e) { - while (e->arpq_head) { - struct sk_buff *skb = e->arpq_head; + struct sk_buff *skb; - e->arpq_head = skb->next; - skb->next = NULL; + while ((skb = __skb_dequeue(&e->arpq)) != NULL) t4_ofld_send(adap, skb); - } - e->arpq_tail = NULL; } /* @@ -222,12 +217,7 @@ void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) */ static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) { - skb->next = NULL; - if (e->arpq_head) - e->arpq_tail->next = skb; - else - e->arpq_head = skb; - e->arpq_tail = skb; + __skb_queue_tail(&e->arpq, skb); } int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, @@ -259,7 +249,8 @@ again: if (e->state == L2T_STATE_RESOLVING && !neigh_event_send(e->neigh, NULL)) { spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_RESOLVING && e->arpq_head) + if (e->state == L2T_STATE_RESOLVING && + !skb_queue_empty(&e->arpq)) write_l2e(adap, e, 1); spin_unlock_bh(&e->lock); } @@ -305,12 +296,82 @@ found: return e; } -/* - * Called when an L2T entry has no more users. +static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan, + u8 port, u8 *dmac) +{ + struct l2t_entry *end, *e, **p; + struct l2t_entry *first_free = NULL; + + for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) { + if (atomic_read(&e->refcnt) == 0) { + if (!first_free) + first_free = e; + } else { + if (e->state == L2T_STATE_SWITCHING) { + if (ether_addr_equal(e->dmac, dmac) && + (e->vlan == vlan) && (e->lport == port)) + goto exists; + } + } + } + + if (first_free) { + e = first_free; + goto found; + } + + return NULL; + +found: + /* The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state < L2T_STATE_SWITCHING) + for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + e->next = NULL; + break; + } + e->state = L2T_STATE_UNUSED; + +exists: + return e; +} + +/* Called when an L2T entry has no more users. The entry is left in the hash + * table since it is likely to be reused but we also bump nfree to indicate + * that the entry can be reallocated for a different neighbor. We also drop + * the existing neighbor reference in case the neighbor is going away and is + * waiting on our reference. + * + * Because entries can be reallocated to other neighbors once their ref count + * drops to 0 we need to take the entry's lock to avoid races with a new + * incarnation. */ +static void _t4_l2e_free(struct l2t_entry *e) +{ + struct l2t_data *d; + struct sk_buff *skb; + + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + while ((skb = __skb_dequeue(&e->arpq)) != NULL) + kfree_skb(skb); + } + + d = container_of(e, struct l2t_data, l2tab[e->idx]); + atomic_inc(&d->nfree); +} + +/* Locked version of _t4_l2e_free */ static void t4_l2e_free(struct l2t_entry *e) { struct l2t_data *d; + struct sk_buff *skb; spin_lock_bh(&e->lock); if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ @@ -318,13 +379,8 @@ static void t4_l2e_free(struct l2t_entry *e) neigh_release(e->neigh); e->neigh = NULL; } - while (e->arpq_head) { - struct sk_buff *skb = e->arpq_head; - - e->arpq_head = skb->next; + while ((skb = __skb_dequeue(&e->arpq)) != NULL) kfree_skb(skb); - } - e->arpq_tail = NULL; } spin_unlock_bh(&e->lock); @@ -457,18 +513,19 @@ EXPORT_SYMBOL(cxgb4_select_ntuple); * on the arpq head. If a packet specifies a failure handler it is invoked, * otherwise the packet is sent to the device. */ -static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) +static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e) { - while (arpq) { - struct sk_buff *skb = arpq; + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&e->arpq)) != NULL) { const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - arpq = skb->next; - skb->next = NULL; + spin_unlock(&e->lock); if (cb->arp_err_handler) cb->arp_err_handler(cb->handle, skb); else t4_ofld_send(adap, skb); + spin_lock(&e->lock); } } @@ -479,7 +536,7 @@ static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) { struct l2t_entry *e; - struct sk_buff *arpq = NULL; + struct sk_buff_head *arpq = NULL; struct l2t_data *d = adap->l2t; int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; @@ -506,10 +563,9 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) if (e->state == L2T_STATE_RESOLVING) { if (neigh->nud_state & NUD_FAILED) { - arpq = e->arpq_head; - e->arpq_head = e->arpq_tail = NULL; + arpq = &e->arpq; } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && - e->arpq_head) { + !skb_queue_empty(&e->arpq)) { write_l2e(adap, e, 1); } } else { @@ -519,43 +575,66 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) write_l2e(adap, e, 0); } - spin_unlock_bh(&e->lock); - if (arpq) - handle_failed_resolution(adap, arpq); + handle_failed_resolution(adap, e); + spin_unlock_bh(&e->lock); } /* Allocate an L2T entry for use by a switching rule. Such need to be * explicitly freed and while busy they are not on any hash chain, so normal * address resolution updates do not see them. */ -struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) +struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, + u8 port, u8 *eth_addr) { + struct l2t_data *d = adap->l2t; struct l2t_entry *e; + int ret; write_lock_bh(&d->lock); - e = alloc_l2e(d); + e = find_or_alloc_l2e(d, vlan, port, eth_addr); if (e) { spin_lock(&e->lock); /* avoid race with t4_l2t_free */ - e->state = L2T_STATE_SWITCHING; - atomic_set(&e->refcnt, 1); + if (!atomic_read(&e->refcnt)) { + e->state = L2T_STATE_SWITCHING; + e->vlan = vlan; + e->lport = port; + ether_addr_copy(e->dmac, eth_addr); + atomic_set(&e->refcnt, 1); + ret = write_l2e(adap, e, 0); + if (ret < 0) { + _t4_l2e_free(e); + spin_unlock(&e->lock); + write_unlock_bh(&d->lock); + return NULL; + } + } else { + atomic_inc(&e->refcnt); + } + spin_unlock(&e->lock); } write_unlock_bh(&d->lock); return e; } -/* Sets/updates the contents of a switching L2T entry that has been allocated - * with an earlier call to @t4_l2t_alloc_switching. +/** + * @dev: net_device pointer + * @vlan: VLAN Id + * @port: Associated port + * @dmac: Destination MAC address to add to L2T + * Returns pointer to the allocated l2t entry + * + * Allocates an L2T entry for use by switching rule of a filter */ -int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, - u8 port, u8 *eth_addr) +struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan, + u8 port, u8 *dmac) { - e->vlan = vlan; - e->lport = port; - memcpy(e->dmac, eth_addr, ETH_ALEN); - return write_l2e(adap, e, 0); + struct adapter *adap = netdev2adap(dev); + + return t4_l2t_alloc_switching(adap, vlan, port, dmac); } +EXPORT_SYMBOL(cxgb4_l2t_alloc_switching); struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) { @@ -585,6 +664,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) d->l2tab[i].state = L2T_STATE_UNUSED; spin_lock_init(&d->l2tab[i].lock); atomic_set(&d->l2tab[i].refcnt, 0); + skb_queue_head_init(&d->l2tab[i].arpq); } return d; } @@ -619,7 +699,8 @@ static char l2e_state(const struct l2t_entry *e) case L2T_STATE_VALID: return 'V'; case L2T_STATE_STALE: return 'S'; case L2T_STATE_SYNC_WRITE: return 'W'; - case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; + case L2T_STATE_RESOLVING: + return skb_queue_empty(&e->arpq) ? 'R' : 'A'; case L2T_STATE_SWITCHING: return 'X'; default: return 'U'; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index b38dc526aad5..4e2d47ac102b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -76,8 +76,7 @@ struct l2t_entry { struct neighbour *neigh; /* associated neighbour */ struct l2t_entry *first; /* start of hash chain */ struct l2t_entry *next; /* next l2t_entry on chain */ - struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ - struct sk_buff *arpq_tail; + struct sk_buff_head arpq; /* packet queue awaiting resolution */ spinlock_t lock; atomic_t refcnt; /* entry reference count */ u16 hash; /* hash bucket the entry is on */ @@ -114,10 +113,11 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, unsigned int priority); u64 cxgb4_select_ntuple(struct net_device *dev, const struct l2t_entry *l2t); +struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan, + u8 port, u8 *dmac); void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); -struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); -int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, - u8 port, u8 *eth_addr); +struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, + u8 port, u8 *dmac); struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end); void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b7b93e7a643d..b4eb4680a27c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -406,7 +406,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q, */ static inline int reclaimable(const struct sge_txq *q) { - int hw_cidx = ntohs(q->stat->cidx); + int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); hw_cidx -= q->cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; } @@ -613,6 +613,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { __free_pages(pg, s->fl_pg_order); + q->mapping_err++; goto out; /* do not try small pages for this error */ } mapping |= RX_LARGE_PG_BUF; @@ -642,6 +643,7 @@ alloc_small_pages: PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { put_page(pg); + q->mapping_err++; goto out; } *d++ = cpu_to_be64(mapping); @@ -663,6 +665,7 @@ out: cred = q->avail - cred; if (unlikely(fl_starving(adap, q))) { smp_wmb(); + q->low++; set_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.starving_fl); } @@ -1029,6 +1032,30 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, *p = 0; } +static void *inline_tx_skb_header(const struct sk_buff *skb, + const struct sge_txq *q, void *pos, + int length) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(length <= left)) { + memcpy(pos, skb->data, length); + pos += length; + } else { + memcpy(pos, skb->data, left); + memcpy(q->desc, skb->data + left, length - left); + pos = (void *)q->desc + (length - left); + } + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) { + *p = 0; + return p + 1; + } + return p; +} + /* * Figure out what HW csum a packet wants and return the appropriate control * bits. @@ -1320,7 +1347,7 @@ out_free: dev_kfree_skb_any(skb); */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { - int hw_cidx = ntohs(q->stat->cidx); + int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); int reclaim = hw_cidx - q->cidx; if (reclaim < 0) @@ -1542,24 +1569,50 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) } /** - * service_ofldq - restart a suspended offload queue + * service_ofldq - service/restart a suspended offload queue * @q: the offload queue * - * Services an offload Tx queue by moving packets from its packet queue - * to the HW Tx ring. The function starts and ends with the queue locked. + * Services an offload Tx queue by moving packets from its Pending Send + * Queue to the Hardware TX ring. The function starts and ends with the + * Send Queue locked, but drops the lock while putting the skb at the + * head of the Send Queue onto the Hardware TX Ring. Dropping the lock + * allows more skbs to be added to the Send Queue by other threads. + * The packet being processed at the head of the Pending Send Queue is + * left on the queue in case we experience DMA Mapping errors, etc. + * and need to give up and restart later. + * + * service_ofldq() can be thought of as a task which opportunistically + * uses other threads execution contexts. We use the Offload Queue + * boolean "service_ofldq_running" to make sure that only one instance + * is ever running at a time ... */ static void service_ofldq(struct sge_ofld_txq *q) { - u64 *pos; + u64 *pos, *before, *end; int credits; struct sk_buff *skb; + struct sge_txq *txq; + unsigned int left; unsigned int written = 0; unsigned int flits, ndesc; + /* If another thread is currently in service_ofldq() processing the + * Pending Send Queue then there's nothing to do. Otherwise, flag + * that we're doing the work and continue. Examining/modifying + * the Offload Queue boolean "service_ofldq_running" must be done + * while holding the Pending Send Queue Lock. + */ + if (q->service_ofldq_running) + return; + q->service_ofldq_running = true; + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { - /* - * We drop the lock but leave skb on sendq, thus retaining - * exclusive access to the state of the queue. + /* We drop the lock while we're working with the skb at the + * head of the Pending Send Queue. This allows more skbs to + * be added to the Pending Send Queue while we're working on + * this one. We don't need to lock to guard the TX Ring + * updates because only one thread of execution is ever + * allowed into service_ofldq() at a time. */ spin_unlock(&q->sendq.lock); @@ -1583,9 +1636,32 @@ static void service_ofldq(struct sge_ofld_txq *q) } else { int last_desc, hdr_len = skb_transport_offset(skb); - memcpy(pos, skb->data, hdr_len); - write_sgl(skb, &q->q, (void *)pos + hdr_len, - pos + flits, hdr_len, + /* The WR headers may not fit within one descriptor. + * So we need to deal with wrap-around here. + */ + before = (u64 *)pos; + end = (u64 *)pos + flits; + txq = &q->q; + pos = (void *)inline_tx_skb_header(skb, &q->q, + (void *)pos, + hdr_len); + if (before > (u64 *)pos) { + left = (u8 *)end - (u8 *)txq->stat; + end = (void *)txq->desc + left; + } + + /* If current position is already at the end of the + * ofld queue, reset the current to point to + * start of the queue and update the end ptr as well. + */ + if (pos == (u64 *)txq->stat) { + left = (u8 *)end - (u8 *)txq->stat; + end = (void *)txq->desc + left; + pos = (void *)txq->desc; + } + + write_sgl(skb, &q->q, (void *)pos, + end, hdr_len, (dma_addr_t *)skb->head); #ifdef CONFIG_NEED_DMA_MAP_STATE skb->dev = q->adap->port[0]; @@ -1604,6 +1680,11 @@ static void service_ofldq(struct sge_ofld_txq *q) written = 0; } + /* Reacquire the Pending Send Queue Lock so we can unlink the + * skb we've just successfully transferred to the TX Ring and + * loop for the next skb which may be at the head of the + * Pending Send Queue. + */ spin_lock(&q->sendq.lock); __skb_unlink(skb, &q->sendq); if (is_ofld_imm(skb)) @@ -1611,6 +1692,11 @@ static void service_ofldq(struct sge_ofld_txq *q) } if (likely(written)) ring_tx_db(q->adap, &q->q, written); + + /*Indicate that no thread is processing the Pending Send Queue + * currently. + */ + q->service_ofldq_running = false; } /** @@ -1624,9 +1710,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) { skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ spin_lock(&q->sendq.lock); + + /* Queue the new skb onto the Offload Queue's Pending Send Queue. If + * that results in this new skb being the only one on the queue, start + * servicing it. If there are other skbs already on the list, then + * either the queue is currently being processed or it's been stopped + * for some reason and it'll be restarted at a later time. Restart + * paths are triggered by events like experiencing a DMA Mapping Error + * or filling the Hardware TX Ring. + */ __skb_queue_tail(&q->sendq, skb); if (q->sendq.qlen == 1) service_ofldq(q); + spin_unlock(&q->sendq.lock); return NET_XMIT_SUCCESS; } @@ -1864,7 +1960,6 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxq->rspq.idx); - skb_mark_napi_id(skb, &rxq->rspq.napi); pi = netdev_priv(skb->dev); if (pi->rxtstamp) cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb), @@ -2193,7 +2288,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) if (likely(work_done < budget)) { int timer_index; - napi_complete(napi); + napi_complete_done(napi, work_done); timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); if (q->adaptive_rx) { @@ -2460,7 +2555,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->size = roundup(iq->size, 16); iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, - &iq->phys_addr, NULL, 0, NUMA_NO_NODE); + &iq->phys_addr, NULL, 0, + dev_to_node(adap->pdev_dev)); if (!iq->desc) return -ENOMEM; @@ -2500,7 +2596,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->size = roundup(fl->size, 8); fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), sizeof(struct rx_sw_desc), &fl->addr, - &fl->sdesc, s->stat_len, NUMA_NO_NODE); + &fl->sdesc, s->stat_len, + dev_to_node(adap->pdev_dev)); if (!fl->desc) goto fl_nomem; @@ -2528,7 +2625,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, goto err; netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); - napi_hash_add(&iq->napi); iq->cur_desc = iq->desc; iq->cidx = 0; iq->gen = 1; @@ -2574,8 +2670,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * simple (and hopefully less wrong). */ if (!is_t4(adap->params.chip) && cong >= 0) { - u32 param, val; + u32 param, val, ch_map = 0; int i; + u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | @@ -2587,9 +2684,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); for (i = 0; i < 4; i++) { if (cong & (1 << i)) - val |= - CONMCTXT_CNGCHMAP_V(1 << (i << 2)); + ch_map |= 1 << (i << cng_ch_bits_log); } + val |= CONMCTXT_CNGCHMAP_V(ch_map); } ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); @@ -2884,7 +2981,7 @@ void t4_free_sge_resources(struct adapter *adap) } /* clean up RDMA and iSCSI Rx queues */ - t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq); + t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq); t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); @@ -3077,8 +3174,7 @@ static int t4_sge_init_soft(struct adapter *adap) int t4_sge_init(struct adapter *adap) { struct sge *s = &adap->sge; - u32 sge_control, sge_control2, sge_conm_ctrl; - unsigned int ingpadboundary, ingpackboundary; + u32 sge_control, sge_conm_ctrl; int ret, egress_threshold; /* @@ -3089,35 +3185,7 @@ int t4_sge_init(struct adapter *adap) s->pktshift = PKTSHIFT_G(sge_control); s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; - /* T4 uses a single control field to specify both the PCIe Padding and - * Packing Boundary. T5 introduced the ability to specify these - * separately. The actual Ingress Packet Data alignment boundary - * within Packed Buffer Mode is the maximum of these two - * specifications. (Note that it makes no real practical sense to - * have the Pading Boudary be larger than the Packing Boundary but you - * could set the chip up that way and, in fact, legacy T4 code would - * end doing this because it would initialize the Padding Boundary and - * leave the Packing Boundary initialized to 0 (16 bytes).) - */ - ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + - INGPADBOUNDARY_SHIFT_X); - if (is_t4(adap->params.chip)) { - s->fl_align = ingpadboundary; - } else { - /* T5 has a different interpretation of one of the PCIe Packing - * Boundary values. - */ - sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); - ingpackboundary = INGPACKBOUNDARY_G(sge_control2); - if (ingpackboundary == INGPACKBOUNDARY_16B_X) - ingpackboundary = 16; - else - ingpackboundary = 1 << (ingpackboundary + - INGPACKBOUNDARY_SHIFT_X); - - s->fl_align = max(ingpadboundary, ingpackboundary); - } - + s->fl_align = t4_fl_pkt_align(adap); ret = t4_sge_init_soft(adap); if (ret < 0) return ret; @@ -3135,10 +3203,21 @@ int t4_sge_init(struct adapter *adap) * buffers. */ sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); - if (is_t4(adap->params.chip)) + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T4: egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); - else + break; + case CHELSIO_T5: egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); + break; + case CHELSIO_T6: + egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); + break; + default: + dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", + CHELSIO_CHIP_VERSION(adap->params.chip)); + return -EINVAL; + } s->fl_starve_thres = 2*egress_threshold + 1; t4_idma_monitor_init(adap, &s->idma_monitor); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cf61a5869c6e..636b4691f252 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1942,8 +1942,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1254, - 0x1280, 0x133c, + 0x11fc, 0x1258, + 0x1280, 0x12d4, + 0x12d9, 0x12d9, + 0x12de, 0x12de, + 0x12e3, 0x12e3, + 0x12e8, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, 0x3060, 0x30b0, @@ -1973,7 +1977,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5e50, 0x5e94, 0x5ea0, 0x5eb0, 0x5ec0, 0x5ec0, - 0x5ec8, 0x5ecc, + 0x5ec8, 0x5ed0, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, @@ -2048,7 +2052,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, - 0x19238, 0x192b0, + 0x19238, 0x19290, + 0x192a4, 0x192b0, 0x192bc, 0x192bc, 0x19348, 0x1934c, 0x193f8, 0x19418, @@ -2442,7 +2447,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x40280, 0x40280, 0x40304, 0x40304, 0x40330, 0x4033c, - 0x41304, 0x413c8, + 0x41304, 0x413b8, + 0x413c0, 0x413c8, 0x413d0, 0x413dc, 0x413f0, 0x413f0, 0x41400, 0x4140c, @@ -5254,7 +5260,7 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) int i; u32 data[2]; - for (i = 0; i < PM_NSTATS; i++) { + for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1); cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A); if (is_t4(adap->params.chip)) { @@ -5281,7 +5287,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) int i; u32 data[2]; - for (i = 0; i < PM_NSTATS; i++) { + for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1); cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A); if (is_t4(adap->params.chip)) { @@ -5310,7 +5316,14 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) if (n == 0) return idx == 0 ? 0xf : 0; - if (n == 1) + /* In T6 (which is a 2 port card), + * port 0 is mapped to channel 0 and port 1 is mapped to channel 1. + * For 2 port T4/T5 adapter, + * port 0 is mapped to channel 0 and 1, + * port 1 is mapped to channel 2 and 3. + */ + if ((n == 1) && + (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } @@ -5689,6 +5702,39 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", }; + static const char * const t6_decode[] = { + "IDMA_IDLE", + "IDMA_PUSH_MORE_CPL_FIFO", + "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", + "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", + "IDMA_PHYSADDR_SEND_PAYLOAD", + "IDMA_FL_REQ_DATA_FL", + "IDMA_FL_DROP", + "IDMA_FL_DROP_SEND_INC", + "IDMA_FL_H_REQ_HEADER_FL", + "IDMA_FL_H_SEND_PCIEHDR", + "IDMA_FL_H_PUSH_CPL_FIFO", + "IDMA_FL_H_SEND_CPL", + "IDMA_FL_H_SEND_IP_HDR_FIRST", + "IDMA_FL_H_SEND_IP_HDR", + "IDMA_FL_H_REQ_NEXT_HEADER_FL", + "IDMA_FL_H_SEND_NEXT_PCIEHDR", + "IDMA_FL_H_SEND_IP_HDR_PADDING", + "IDMA_FL_D_SEND_PCIEHDR", + "IDMA_FL_D_SEND_CPL_AND_IP_HDR", + "IDMA_FL_D_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_PCIEHDR", + "IDMA_FL_PUSH_CPL_FIFO", + "IDMA_FL_SEND_CPL", + "IDMA_FL_SEND_PAYLOAD_FIRST", + "IDMA_FL_SEND_PAYLOAD", + "IDMA_FL_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_NEXT_PCIEHDR", + "IDMA_FL_SEND_PADDING", + "IDMA_FL_SEND_COMPLETION_TO_IMSG", + }; static const u32 sge_regs[] = { SGE_DEBUG_DATA_LOW_INDEX_2_A, SGE_DEBUG_DATA_LOW_INDEX_3_A, @@ -5697,6 +5743,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) const char **sge_idma_decode; int sge_idma_decode_nstates; int i; + unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + + /* Select the right set of decode strings to dump depending on the + * adapter chip type. + */ + switch (chip_version) { + case CHELSIO_T4: + sge_idma_decode = (const char **)t4_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); + break; + + case CHELSIO_T5: + sge_idma_decode = (const char **)t5_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); + break; + + case CHELSIO_T6: + sge_idma_decode = (const char **)t6_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); + break; + + default: + dev_err(adapter->pdev_dev, + "Unsupported chip version %d\n", chip_version); + return; + } if (is_t4(adapter->params.chip)) { sge_idma_decode = (const char **)t4_decode; @@ -6097,6 +6169,59 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, } /** + * t4_fl_pkt_align - return the fl packet alignment + * @adap: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. + * + */ +int t4_fl_pkt_align(struct adapter *adap) +{ + u32 sge_control, sge_control2; + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + sge_control = t4_read_reg(adap, SGE_CONTROL_A); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + * Padding Boundary values in T6 starts from 8B, + * where as it is 32B for T4 and T5. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + ingpad_shift = INGPADBOUNDARY_SHIFT_X; + else + ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; + + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adap->params.chip)) { + /* T5 has a weird interpretation of one of the PCIe Packing + * Boundary values. No idea why ... + */ + sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + +/** * t4_fixup_host_params - fix up host-dependent parameters * @adap: the adapter * @page_size: the host's Base Page Size @@ -6114,6 +6239,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, unsigned int stat_len = cache_line_size > 64 ? 128 : 64; unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; unsigned int fl_align_log = fls(fl_align) - 1; + unsigned int ingpad; t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A, HOSTPAGESIZEPF0_V(sge_hps) | @@ -6161,10 +6287,16 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, fl_align = 64; fl_align_log = 6; } + + if (is_t5(adap->params.chip)) + ingpad = INGPCIEBOUNDARY_32B_X; + else + ingpad = T6_INGPADBOUNDARY_32B_X; + t4_set_reg_field(adap, SGE_CONTROL_A, INGPADBOUNDARY_V(INGPADBOUNDARY_M) | EGRSTATUSPAGESIZE_F, - INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) | + INGPADBOUNDARY_V(ingpad) | EGRSTATUSPAGESIZE_V(stat_len != 64)); t4_set_reg_field(adap, SGE_CONTROL2_A, INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), @@ -7060,7 +7192,12 @@ int t4_prep_adapter(struct adapter *adapter) NUM_MPS_CLS_SRAM_L_INSTANCES; adapter->params.arch.mps_rplc_size = 128; adapter->params.arch.nchan = NCHAN; + adapter->params.arch.pm_stats_cnt = PM_NSTATS; adapter->params.arch.vfcount = 128; + /* Congestion map is for 4 channels so that + * MPS can have 4 priority per port. + */ + adapter->params.arch.cng_ch_bits_log = 2; break; case CHELSIO_T5: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); @@ -7069,7 +7206,9 @@ int t4_prep_adapter(struct adapter *adapter) NUM_MPS_T5_CLS_SRAM_L_INSTANCES; adapter->params.arch.mps_rplc_size = 128; adapter->params.arch.nchan = NCHAN; + adapter->params.arch.pm_stats_cnt = PM_NSTATS; adapter->params.arch.vfcount = 128; + adapter->params.arch.cng_ch_bits_log = 2; break; case CHELSIO_T6: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); @@ -7078,7 +7217,12 @@ int t4_prep_adapter(struct adapter *adapter) NUM_MPS_T5_CLS_SRAM_L_INSTANCES; adapter->params.arch.mps_rplc_size = 256; adapter->params.arch.nchan = 2; + adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS; adapter->params.arch.vfcount = 256; + /* Congestion map will be for 2 channels so that + * MPS can have 8 priority per port. + */ + adapter->params.arch.cng_ch_bits_log = 3; break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 13708fde1668..2fc60e83a7a1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -48,6 +48,7 @@ enum { NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ PM_NSTATS = 5, /* # of PM stats */ + T6_PM_NSTATS = 7, /* # of PM stats in T6 */ MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 03ed00c49823..a8dda635456d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -162,6 +162,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ + CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index fc3044c8ac1c..9fea255c7e87 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -492,6 +492,9 @@ #define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S) #define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M) +#define T6_STATMODE_S 0 +#define T6_STATMODE_V(x) ((x) << T6_STATMODE_S) + #define SGE_DBFIFO_STATUS2_A 0x1118 #define HP_INT_THRESH_T5_S 10 @@ -2395,6 +2398,30 @@ #define MPS_CLS_TCAM_DATA0_A 0xf000 #define MPS_CLS_TCAM_DATA1_A 0xf004 +#define VIDL_S 16 +#define VIDL_M 0xffffU +#define VIDL_G(x) (((x) >> VIDL_S) & VIDL_M) + +#define DATALKPTYPE_S 10 +#define DATALKPTYPE_M 0x3U +#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M) + +#define DATAPORTNUM_S 12 +#define DATAPORTNUM_M 0xfU +#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M) + +#define DATADIPHIT_S 8 +#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S) +#define DATADIPHIT_F DATADIPHIT_V(1U) + +#define DATAVIDH2_S 7 +#define DATAVIDH2_V(x) ((x) << DATAVIDH2_S) +#define DATAVIDH2_F DATAVIDH2_V(1U) + +#define DATAVIDH1_S 0 +#define DATAVIDH1_M 0x7fU +#define DATAVIDH1_G(x) (((x) >> DATAVIDH1_S) & DATAVIDH1_M) + #define USED_S 16 #define USED_M 0x7ffU #define USED_G(x) (((x) >> USED_S) & USED_M) @@ -2802,6 +2829,10 @@ #define HASHEN_V(x) ((x) << HASHEN_S) #define HASHEN_F HASHEN_V(1U) +#define ASLIPCOMPEN_S 17 +#define ASLIPCOMPEN_V(x) ((x) << ASLIPCOMPEN_S) +#define ASLIPCOMPEN_F ASLIPCOMPEN_V(1U) + #define REQQPARERR_S 16 #define REQQPARERR_V(x) ((x) << REQQPARERR_S) #define REQQPARERR_F REQQPARERR_V(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index 7bdee3bf75ec..a5231fa771db 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -53,6 +53,9 @@ #define INGPADBOUNDARY_SHIFT_X 5 +#define T6_INGPADBOUNDARY_SHIFT_X 3 +#define T6_INGPADBOUNDARY_32B_X 2 + /* CONTROL2 register */ #define INGPACKBOUNDARY_SHIFT_X 5 #define INGPACKBOUNDARY_16B_X 0 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index fa3786a9d30e..6528231d8a59 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2607,7 +2607,7 @@ int t4vf_sge_init(struct adapter *adapter) u32 fl0 = sge_params->sge_fl_buffer_size[0]; u32 fl1 = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; - unsigned int ingpadboundary, ingpackboundary; + unsigned int ingpadboundary, ingpackboundary, ingpad_shift; /* * Start by vetting the basic SGE parameters which have been set up by @@ -2642,9 +2642,16 @@ int t4vf_sge_init(struct adapter *adapter) * could set the chip up that way and, in fact, legacy T4 code would * end doing this because it would initialize the Padding Boundary and * leave the Packing Boundary initialized to 0 (16 bytes).) + * Padding Boundary values in T6 starts from 8B, + * where as it is 32B for T4 and T5. */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + ingpad_shift = INGPADBOUNDARY_SHIFT_X; + else + ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) + - INGPADBOUNDARY_SHIFT_X); + ingpad_shift); if (is_t4(adapter->params.chip)) { s->fl_align = ingpadboundary; } else { diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h index b516b12b1884..f859db3d254c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h @@ -54,6 +54,7 @@ #define T4VF_MPS_BASE_ADDR 0x0100 #define T4VF_PL_BASE_ADDR 0x0200 #define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T6VF_MBDATA_BASE_ADDR 0x0280 #define T4VF_CIM_BASE_ADDR 0x0300 #define T4VF_REGMAP_START 0x0000 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 63dd5fdac5b9..b6fa74aafe47 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -120,12 +120,19 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 1, 1, 3, 5, 10, 10, 20, 50, 100 }; - u32 v; + u32 v, mbox_data; int i, ms, delay_idx; const __be64 *p; - u32 mbox_data = T4VF_MBDATA_BASE_ADDR; u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; + /* In T6, mailbox size is changed to 128 bytes to avoid + * invalidating the entire prefetch buffer. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + mbox_data = T4VF_MBDATA_BASE_ADDR; + else + mbox_data = T6VF_MBDATA_BASE_ADDR; + /* * Commands must be multiples of 16 bytes in length and may not be * larger than the size of the Mailbox Data register array. diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b36643ef0593..b2182d3ba3cc 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2458,13 +2458,11 @@ static int enic_dev_init(struct enic *enic) switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); - napi_hash_add(&enic->napi[0]); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) { netif_napi_add(netdev, &enic->napi[i], enic_poll_msix_rq, NAPI_POLL_WEIGHT); - napi_hash_add(&enic->napi[i]); } for (i = 0; i < enic->wq_count; i++) netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 8966f3159bb2..3acde3b9b767 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1990,7 +1990,7 @@ SetMulticastFilter(struct net_device *dev) static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; -static int __init de4x5_eisa_probe (struct device *gendev) +static int de4x5_eisa_probe(struct device *gendev) { struct eisa_device *edev; u_long iobase; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index ccca4799c27b..f92b6d948398 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -70,7 +70,6 @@ static const int multicast_filter_limit = 0x40; static int rio_open (struct net_device *dev); static void rio_timer (unsigned long data); static void rio_tx_timeout (struct net_device *dev); -static void alloc_list (struct net_device *dev); static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); static irqreturn_t rio_interrupt (int irq, void *dev_instance); static void rio_free_tx (struct net_device *dev, int irq); @@ -253,19 +252,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_unmap_rx; - if (np->chip_id == CHIP_IP1000A && - (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { - /* PHY magic taken from ipg driver, undocumented registers */ - mii_write(dev, np->phy_addr, 31, 0x0001); - mii_write(dev, np->phy_addr, 27, 0x01e0); - mii_write(dev, np->phy_addr, 31, 0x0002); - mii_write(dev, np->phy_addr, 27, 0xeb8e); - mii_write(dev, np->phy_addr, 31, 0x0000); - mii_write(dev, np->phy_addr, 30, 0x005e); - /* advertise 1000BASE-T half & full duplex, prefer MASTER */ - mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); - } - /* Fiber device? */ np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; @@ -275,13 +261,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) if (np->an_enable == 2) { np->an_enable = 1; } - mii_set_media_pcs (dev); } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; - mii_set_media (dev); } err = register_netdev (dev); @@ -446,19 +430,106 @@ static void rio_set_led_mode(struct net_device *dev) dw32(ASICCtrl, mode); } -static int -rio_open (struct net_device *dev) +static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) +{ + return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); +} + +static void free_list(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + struct sk_buff *skb; + int i; + + /* Free all the skbuffs in the queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + skb = np->rx_skbuff[i]; + if (skb) { + pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), + skb->len, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + np->rx_skbuff[i] = NULL; + } + np->rx_ring[i].status = 0; + np->rx_ring[i].fraginfo = 0; + } + for (i = 0; i < TX_RING_SIZE; i++) { + skb = np->tx_skbuff[i]; + if (skb) { + pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]), + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + np->tx_skbuff[i] = NULL; + } + } +} + +static void rio_reset_ring(struct netdev_private *np) +{ + int i; + + np->cur_rx = 0; + np->cur_tx = 0; + np->old_rx = 0; + np->old_tx = 0; + + for (i = 0; i < TX_RING_SIZE; i++) + np->tx_ring[i].status = cpu_to_le64(TFDDone); + + for (i = 0; i < RX_RING_SIZE; i++) + np->rx_ring[i].status = 0; +} + + /* allocate and initialize Tx and Rx descriptors */ +static int alloc_list(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + rio_reset_ring(np); + np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); + + /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = NULL; + np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + + ((i + 1) % TX_RING_SIZE) * + sizeof(struct netdev_desc)); + } + + /* Initialize Rx descriptors & allocate buffers */ + for (i = 0; i < RX_RING_SIZE; i++) { + /* Allocated fixed size of skbuff */ + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); + np->rx_skbuff[i] = skb; + if (!skb) { + free_list(dev); + return -ENOMEM; + } + + np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + + ((i + 1) % RX_RING_SIZE) * + sizeof(struct netdev_desc)); + /* Rubicon now supports 40 bits of addressing space. */ + np->rx_ring[i].fraginfo = + cpu_to_le64(pci_map_single( + np->pdev, skb->data, np->rx_buf_sz, + PCI_DMA_FROMDEVICE)); + np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); + } + + return 0; +} + +static void rio_hw_init(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; - const int irq = np->pdev->irq; int i; u16 macctrl; - i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); - if (i) - return i; - /* Reset all logic functions */ dw16(ASICCtrl + 2, GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); @@ -469,11 +540,31 @@ rio_open (struct net_device *dev) /* DebugCtrl bit 4, 5, 9 must set */ dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); + if (np->chip_id == CHIP_IP1000A && + (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { + /* PHY magic taken from ipg driver, undocumented registers */ + mii_write(dev, np->phy_addr, 31, 0x0001); + mii_write(dev, np->phy_addr, 27, 0x01e0); + mii_write(dev, np->phy_addr, 31, 0x0002); + mii_write(dev, np->phy_addr, 27, 0xeb8e); + mii_write(dev, np->phy_addr, 31, 0x0000); + mii_write(dev, np->phy_addr, 30, 0x005e); + /* advertise 1000BASE-T half & full duplex, prefer MASTER */ + mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); + } + + if (np->phy_media) + mii_set_media_pcs(dev); + else + mii_set_media(dev); + /* Jumbo frame */ if (np->jumbo != 0) dw16(MaxFrameSize, MAX_JUMBO+14); - alloc_list (dev); + /* Set RFDListPtr */ + dw32(RFDListPtr0, np->rx_ring_dma); + dw32(RFDListPtr1, 0); /* Set station address */ /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works @@ -509,10 +600,6 @@ rio_open (struct net_device *dev) dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } - setup_timer(&np->timer, rio_timer, (unsigned long)dev); - np->timer.expires = jiffies + 1*HZ; - add_timer (&np->timer); - /* Start Tx/Rx */ dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); @@ -522,6 +609,42 @@ rio_open (struct net_device *dev) macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; dw16(MACCtrl, macctrl); +} + +static void rio_hw_stop(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + + /* Disable interrupts */ + dw16(IntEnable, 0); + + /* Stop Tx and Rx logics */ + dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); +} + +static int rio_open(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + const int irq = np->pdev->irq; + int i; + + i = alloc_list(dev); + if (i) + return i; + + rio_hw_init(dev); + + i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); + if (i) { + rio_hw_stop(dev); + free_list(dev); + return i; + } + + setup_timer(&np->timer, rio_timer, (unsigned long)dev); + np->timer.expires = jiffies + 1 * HZ; + add_timer(&np->timer); netif_start_queue (dev); @@ -586,60 +709,6 @@ rio_tx_timeout (struct net_device *dev) dev->trans_start = jiffies; /* prevent tx timeout */ } - /* allocate and initialize Tx and Rx descriptors */ -static void -alloc_list (struct net_device *dev) -{ - struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->ioaddr; - int i; - - np->cur_rx = np->cur_tx = 0; - np->old_rx = np->old_tx = 0; - np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); - - /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ - for (i = 0; i < TX_RING_SIZE; i++) { - np->tx_skbuff[i] = NULL; - np->tx_ring[i].status = cpu_to_le64 (TFDDone); - np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + - ((i+1)%TX_RING_SIZE) * - sizeof (struct netdev_desc)); - } - - /* Initialize Rx descriptors */ - for (i = 0; i < RX_RING_SIZE; i++) { - np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + - ((i + 1) % RX_RING_SIZE) * - sizeof (struct netdev_desc)); - np->rx_ring[i].status = 0; - np->rx_ring[i].fraginfo = 0; - np->rx_skbuff[i] = NULL; - } - - /* Allocate the rx buffers */ - for (i = 0; i < RX_RING_SIZE; i++) { - /* Allocated fixed size of skbuff */ - struct sk_buff *skb; - - skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); - np->rx_skbuff[i] = skb; - if (skb == NULL) - break; - - /* Rubicon now supports 40 bits of addressing space. */ - np->rx_ring[i].fraginfo = - cpu_to_le64 ( pci_map_single ( - np->pdev, skb->data, np->rx_buf_sz, - PCI_DMA_FROMDEVICE)); - np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); - } - - /* Set RFDListPtr */ - dw32(RFDListPtr0, np->rx_ring_dma); - dw32(RFDListPtr1, 0); -} - static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev) { @@ -748,11 +817,6 @@ rio_interrupt (int irq, void *dev_instance) return IRQ_RETVAL(handled); } -static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) -{ - return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); -} - static void rio_free_tx (struct net_device *dev, int irq) { @@ -1730,44 +1794,16 @@ static int rio_close (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); - void __iomem *ioaddr = np->ioaddr; - struct pci_dev *pdev = np->pdev; - struct sk_buff *skb; - int i; netif_stop_queue (dev); - /* Disable interrupts */ - dw16(IntEnable, 0); - - /* Stop Tx and Rx logics */ - dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); + rio_hw_stop(dev); free_irq(pdev->irq, dev); del_timer_sync (&np->timer); - /* Free all the skbuffs in the queue. */ - for (i = 0; i < RX_RING_SIZE; i++) { - skb = np->rx_skbuff[i]; - if (skb) { - pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]), - skb->len, PCI_DMA_FROMDEVICE); - dev_kfree_skb (skb); - np->rx_skbuff[i] = NULL; - } - np->rx_ring[i].status = 0; - np->rx_ring[i].fraginfo = 0; - } - for (i = 0; i < TX_RING_SIZE; i++) { - skb = np->tx_skbuff[i]; - if (skb) { - pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]), - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb (skb); - np->tx_skbuff[i] = NULL; - } - } + free_list(dev); return 0; } @@ -1795,11 +1831,55 @@ rio_remove1 (struct pci_dev *pdev) } } +#ifdef CONFIG_PM_SLEEP +static int rio_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct netdev_private *np = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + del_timer_sync(&np->timer); + rio_hw_stop(dev); + + return 0; +} + +static int rio_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct netdev_private *np = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + rio_reset_ring(np); + rio_hw_init(dev); + np->timer.expires = jiffies + 1 * HZ; + add_timer(&np->timer); + netif_device_attach(dev); + dl2k_enable_int(np); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); +#define RIO_PM_OPS (&rio_pm_ops) + +#else + +#define RIO_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + static struct pci_driver rio_driver = { .name = "dl2k", .id_table = rio_pci_tbl, .probe = rio_probe1, .remove = rio_remove1, + .driver.pm = RIO_PM_OPS, }; module_pci_driver(rio_driver); diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 13d00a38a5bd..b69a9eacc531 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -255,15 +255,9 @@ static int dnet_mii_probe(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = NULL; - int phy_addr; /* find the first phy */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (bp->mii_bus->phy_map[phy_addr]) { - phydev = bp->mii_bus->phy_map[phy_addr]; - break; - } - } + phydev = phy_find_first(bp->mii_bus); if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); @@ -274,11 +268,11 @@ static int dnet_mii_probe(struct net_device *dev) /* attach the mac to the phy */ if (bp->capabilities & DNET_HAS_RMII) { - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &dnet_handle_link_change, PHY_INTERFACE_MODE_RMII); } else { - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &dnet_handle_link_change, PHY_INTERFACE_MODE_MII); } @@ -308,7 +302,7 @@ static int dnet_mii_probe(struct net_device *dev) static int dnet_mii_init(struct dnet *bp) { - int err, i; + int err; bp->mii_bus = mdiobus_alloc(); if (bp->mii_bus == NULL) @@ -323,16 +317,6 @@ static int dnet_mii_init(struct dnet *bp) bp->mii_bus->priv = bp; - bp->mii_bus->irq = devm_kmalloc(&bp->pdev->dev, - sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!bp->mii_bus->irq) { - err = -ENOMEM; - goto err_out; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - if (mdiobus_register(bp->mii_bus)) { err = -ENXIO; goto err_out; @@ -892,9 +876,7 @@ static int dnet_probe(struct platform_device *pdev) (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); phydev = bp->phy_dev; - dev_info(&pdev->dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 6ee78c203eca..cf837831304b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -37,7 +37,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "10.6.0.3" +#define DRV_VER "11.0.0.0" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -521,7 +521,7 @@ struct be_adapter { struct be_drv_stats drv_stats; struct be_aic_obj aic_obj[MAX_EVT_QS]; u8 vlan_prio_bmap; /* Available Priority BitMap */ - u16 recommended_prio; /* Recommended Priority */ + u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */ struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ struct be_dma_mem stats_cmd; @@ -547,10 +547,6 @@ struct be_adapter { u32 beacon_state; /* for set_phys_id */ - bool eeh_error; - bool fw_timeout; - bool hw_error; - u32 port_num; char port_name; u8 mc_type; @@ -574,6 +570,8 @@ struct be_adapter { struct be_resources pool_res; /* resources available for the port */ struct be_resources res; /* resources available for the func */ u16 num_vfs; /* Number of VFs provisioned by PF */ + u8 pf_num; /* Numbering used by FW, starts at 0 */ + u8 vf_num; /* Numbering used by FW, starts at 1 */ u8 virtfn; struct be_vf_cfg *vf_cfg; bool be3_native; @@ -591,11 +589,10 @@ struct be_adapter { u32 msg_enable; int be_get_temp_freq; struct be_hwmon hwmon_info; - u8 pf_number; - u8 pci_func_num; struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */ u32 bmc_filt_mask; + u32 fat_dump_len; u16 serial_num[CNTL_SERIAL_NUM_WORDS]; }; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1795c935ff02..b63d8ad2e115 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -308,8 +308,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, if (evt->valid) { adapter->vlan_prio_bmap = evt->available_priority_bmap; - adapter->recommended_prio &= ~VLAN_PRIO_MASK; - adapter->recommended_prio = + adapter->recommended_prio_bits = evt->reco_default_priority << VLAN_PRIO_SHIFT; } } @@ -1713,49 +1712,40 @@ err: } /* Uses synchronous mcc */ -int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) +int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size) { - struct be_mcc_wrb *wrb; + struct be_mcc_wrb wrb = {0}; struct be_cmd_req_get_fat *req; int status; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - req = embedded_payload(wrb); + req = embedded_payload(&wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, - NULL); + OPCODE_COMMON_MANAGE_FAT, sizeof(*req), + &wrb, NULL); req->fat_operation = cpu_to_le32(QUERY_FAT); - status = be_mcc_notify_wait(adapter); + status = be_cmd_notify_wait(adapter, &wrb); if (!status) { - struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); + struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb); - if (log_size && resp->log_size) - *log_size = le32_to_cpu(resp->log_size) - + if (dump_size && resp->log_size) + *dump_size = le32_to_cpu(resp->log_size) - sizeof(u32); } -err: - spin_unlock_bh(&adapter->mcc_lock); return status; } -int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) +int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) { struct be_dma_mem get_fat_cmd; struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; u32 offset = 0, total_size, buf_size, log_offset = sizeof(u32), payload_len; - int status = 0; + int status; if (buf_len == 0) - return -EIO; + return 0; total_size = buf_len; @@ -1763,11 +1753,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, get_fat_cmd.size, &get_fat_cmd.dma, GFP_ATOMIC); - if (!get_fat_cmd.va) { - dev_err(&adapter->pdev->dev, - "Memory allocation failure while reading FAT data\n"); + if (!get_fat_cmd.va) return -ENOMEM; - } spin_lock_bh(&adapter->mcc_lock); @@ -2291,10 +2278,11 @@ err: return status; } -int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, - const char *obj_name, u32 *data_written, - u8 *change_status, u8 *addn_status) +static int lancer_cmd_write_object(struct be_adapter *adapter, + struct be_dma_mem *cmd, u32 data_size, + u32 data_offset, const char *obj_name, + u32 *data_written, u8 *change_status, + u8 *addn_status) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_write_object *req; @@ -2410,7 +2398,8 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter) return status; } -int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) +static int lancer_cmd_delete_object(struct be_adapter *adapter, + const char *obj_name) { struct lancer_cmd_req_delete_object *req; struct be_mcc_wrb *wrb; @@ -2485,9 +2474,9 @@ err_unlock: return status; } -int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 flash_type, u32 flash_opcode, u32 img_offset, - u32 buf_size) +static int be_cmd_write_flashrom(struct be_adapter *adapter, + struct be_dma_mem *cmd, u32 flash_type, + u32 flash_opcode, u32 img_offset, u32 buf_size) { struct be_mcc_wrb *wrb; struct be_cmd_write_flashrom *req; @@ -2533,8 +2522,8 @@ err_unlock: return status; } -int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, - u16 img_optype, u32 img_offset, u32 crc_offset) +static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, + u16 img_optype, u32 img_offset, u32 crc_offset) { struct be_cmd_read_flash_crc *req; struct be_mcc_wrb *wrb; @@ -2571,6 +2560,579 @@ err: return status; } +static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; + +static bool phy_flashing_required(struct be_adapter *adapter) +{ + return (adapter->phy.phy_type == PHY_TYPE_TN_8022 && + adapter->phy.interface_type == PHY_TYPE_BASET_10GB); +} + +static bool is_comp_in_ufi(struct be_adapter *adapter, + struct flash_section_info *fsec, int type) +{ + int i = 0, img_type = 0; + struct flash_section_info_g2 *fsec_g2 = NULL; + + if (BE2_chip(adapter)) + fsec_g2 = (struct flash_section_info_g2 *)fsec; + + for (i = 0; i < MAX_FLASH_COMP; i++) { + if (fsec_g2) + img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); + else + img_type = le32_to_cpu(fsec->fsec_entry[i].type); + + if (img_type == type) + return true; + } + return false; +} + +static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, + int header_size, + const struct firmware *fw) +{ + struct flash_section_info *fsec = NULL; + const u8 *p = fw->data; + + p += header_size; + while (p < (fw->data + fw->size)) { + fsec = (struct flash_section_info *)p; + if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) + return fsec; + p += 32; + } + return NULL; +} + +static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, + u32 img_offset, u32 img_size, int hdr_size, + u16 img_optype, bool *crc_match) +{ + u32 crc_offset; + int status; + u8 crc[4]; + + status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset, + img_size - 4); + if (status) + return status; + + crc_offset = hdr_size + img_offset + img_size - 4; + + /* Skip flashing, if crc of flashed region matches */ + if (!memcmp(crc, p + crc_offset, 4)) + *crc_match = true; + else + *crc_match = false; + + return status; +} + +static int be_flash(struct be_adapter *adapter, const u8 *img, + struct be_dma_mem *flash_cmd, int optype, int img_size, + u32 img_offset) +{ + u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0; + struct be_cmd_write_flashrom *req = flash_cmd->va; + int status; + + while (total_bytes) { + num_bytes = min_t(u32, 32 * 1024, total_bytes); + + total_bytes -= num_bytes; + + if (!total_bytes) { + if (optype == OPTYPE_PHY_FW) + flash_op = FLASHROM_OPER_PHY_FLASH; + else + flash_op = FLASHROM_OPER_FLASH; + } else { + if (optype == OPTYPE_PHY_FW) + flash_op = FLASHROM_OPER_PHY_SAVE; + else + flash_op = FLASHROM_OPER_SAVE; + } + + memcpy(req->data_buf, img, num_bytes); + img += num_bytes; + status = be_cmd_write_flashrom(adapter, flash_cmd, optype, + flash_op, img_offset + + bytes_sent, num_bytes); + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && + optype == OPTYPE_PHY_FW) + break; + else if (status) + return status; + + bytes_sent += num_bytes; + } + return 0; +} + +/* For BE2, BE3 and BE3-R */ +static int be_flash_BEx(struct be_adapter *adapter, + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) +{ + int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); + struct device *dev = &adapter->pdev->dev; + struct flash_section_info *fsec = NULL; + int status, i, filehdr_size, num_comp; + const struct flash_comp *pflashcomp; + bool crc_match; + const u8 *p; + + struct flash_comp gen3_flash_types[] = { + { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, + { BE3_REDBOOT_START, OPTYPE_REDBOOT, + BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE}, + { BE3_ISCSI_BIOS_START, OPTYPE_BIOS, + BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI}, + { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS, + BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE}, + { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS, + BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE}, + { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI}, + { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE}, + { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP, + BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}, + { BE3_NCSI_START, OPTYPE_NCSI_FW, + BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI}, + { BE3_PHY_FW_START, OPTYPE_PHY_FW, + BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY} + }; + + struct flash_comp gen2_flash_types[] = { + { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, + { BE2_REDBOOT_START, OPTYPE_REDBOOT, + BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE}, + { BE2_ISCSI_BIOS_START, OPTYPE_BIOS, + BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI}, + { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS, + BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE}, + { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS, + BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE}, + { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI}, + { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE}, + { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP, + BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE} + }; + + if (BE3_chip(adapter)) { + pflashcomp = gen3_flash_types; + filehdr_size = sizeof(struct flash_file_hdr_g3); + num_comp = ARRAY_SIZE(gen3_flash_types); + } else { + pflashcomp = gen2_flash_types; + filehdr_size = sizeof(struct flash_file_hdr_g2); + num_comp = ARRAY_SIZE(gen2_flash_types); + img_hdrs_size = 0; + } + + /* Get flash section info*/ + fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); + if (!fsec) { + dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); + return -1; + } + for (i = 0; i < num_comp; i++) { + if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) + continue; + + if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && + memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) + continue; + + if (pflashcomp[i].optype == OPTYPE_PHY_FW && + !phy_flashing_required(adapter)) + continue; + + if (pflashcomp[i].optype == OPTYPE_REDBOOT) { + status = be_check_flash_crc(adapter, fw->data, + pflashcomp[i].offset, + pflashcomp[i].size, + filehdr_size + + img_hdrs_size, + OPTYPE_REDBOOT, &crc_match); + if (status) { + dev_err(dev, + "Could not get CRC for 0x%x region\n", + pflashcomp[i].optype); + continue; + } + + if (crc_match) + continue; + } + + p = fw->data + filehdr_size + pflashcomp[i].offset + + img_hdrs_size; + if (p + pflashcomp[i].size > fw->data + fw->size) + return -1; + + status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, + pflashcomp[i].size, 0); + if (status) { + dev_err(dev, "Flashing section type 0x%x failed\n", + pflashcomp[i].img_type); + return status; + } + } + return 0; +} + +static u16 be_get_img_optype(struct flash_section_entry fsec_entry) +{ + u32 img_type = le32_to_cpu(fsec_entry.type); + u16 img_optype = le16_to_cpu(fsec_entry.optype); + + if (img_optype != 0xFFFF) + return img_optype; + + switch (img_type) { + case IMAGE_FIRMWARE_ISCSI: + img_optype = OPTYPE_ISCSI_ACTIVE; + break; + case IMAGE_BOOT_CODE: + img_optype = OPTYPE_REDBOOT; + break; + case IMAGE_OPTION_ROM_ISCSI: + img_optype = OPTYPE_BIOS; + break; + case IMAGE_OPTION_ROM_PXE: + img_optype = OPTYPE_PXE_BIOS; + break; + case IMAGE_OPTION_ROM_FCOE: + img_optype = OPTYPE_FCOE_BIOS; + break; + case IMAGE_FIRMWARE_BACKUP_ISCSI: + img_optype = OPTYPE_ISCSI_BACKUP; + break; + case IMAGE_NCSI: + img_optype = OPTYPE_NCSI_FW; + break; + case IMAGE_FLASHISM_JUMPVECTOR: + img_optype = OPTYPE_FLASHISM_JUMPVECTOR; + break; + case IMAGE_FIRMWARE_PHY: + img_optype = OPTYPE_SH_PHY_FW; + break; + case IMAGE_REDBOOT_DIR: + img_optype = OPTYPE_REDBOOT_DIR; + break; + case IMAGE_REDBOOT_CONFIG: + img_optype = OPTYPE_REDBOOT_CONFIG; + break; + case IMAGE_UFI_DIR: + img_optype = OPTYPE_UFI_DIR; + break; + default: + break; + } + + return img_optype; +} + +static int be_flash_skyhawk(struct be_adapter *adapter, + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) +{ + int img_hdrs_size = num_of_images * sizeof(struct image_hdr); + bool crc_match, old_fw_img, flash_offset_support = true; + struct device *dev = &adapter->pdev->dev; + struct flash_section_info *fsec = NULL; + u32 img_offset, img_size, img_type; + u16 img_optype, flash_optype; + int status, i, filehdr_size; + const u8 *p; + + filehdr_size = sizeof(struct flash_file_hdr_g3); + fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); + if (!fsec) { + dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); + return -EINVAL; + } + +retry_flash: + for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { + img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); + img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); + img_type = le32_to_cpu(fsec->fsec_entry[i].type); + img_optype = be_get_img_optype(fsec->fsec_entry[i]); + old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; + + if (img_optype == 0xFFFF) + continue; + + if (flash_offset_support) + flash_optype = OPTYPE_OFFSET_SPECIFIED; + else + flash_optype = img_optype; + + /* Don't bother verifying CRC if an old FW image is being + * flashed + */ + if (old_fw_img) + goto flash; + + status = be_check_flash_crc(adapter, fw->data, img_offset, + img_size, filehdr_size + + img_hdrs_size, flash_optype, + &crc_match); + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || + base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { + /* The current FW image on the card does not support + * OFFSET based flashing. Retry using older mechanism + * of OPTYPE based flashing + */ + if (flash_optype == OPTYPE_OFFSET_SPECIFIED) { + flash_offset_support = false; + goto retry_flash; + } + + /* The current FW image on the card does not recognize + * the new FLASH op_type. The FW download is partially + * complete. Reboot the server now to enable FW image + * to recognize the new FLASH op_type. To complete the + * remaining process, download the same FW again after + * the reboot. + */ + dev_err(dev, "Flash incomplete. Reset the server\n"); + dev_err(dev, "Download FW image again after reset\n"); + return -EAGAIN; + } else if (status) { + dev_err(dev, "Could not get CRC for 0x%x region\n", + img_optype); + return -EFAULT; + } + + if (crc_match) + continue; + +flash: + p = fw->data + filehdr_size + img_offset + img_hdrs_size; + if (p + img_size > fw->data + fw->size) + return -1; + + status = be_flash(adapter, p, flash_cmd, flash_optype, img_size, + img_offset); + + /* The current FW image on the card does not support OFFSET + * based flashing. Retry using older mechanism of OPTYPE based + * flashing + */ + if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD && + flash_optype == OPTYPE_OFFSET_SPECIFIED) { + flash_offset_support = false; + goto retry_flash; + } + + /* For old FW images ignore ILLEGAL_FIELD error or errors on + * UFI_DIR region + */ + if (old_fw_img && + (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || + (img_optype == OPTYPE_UFI_DIR && + base_status(status) == MCC_STATUS_FAILED))) { + continue; + } else if (status) { + dev_err(dev, "Flashing section type 0x%x failed\n", + img_type); + + switch (addl_status(status)) { + case MCC_ADDL_STATUS_MISSING_SIGNATURE: + dev_err(dev, + "Digital signature missing in FW\n"); + return -EINVAL; + case MCC_ADDL_STATUS_INVALID_SIGNATURE: + dev_err(dev, + "Invalid digital signature in FW\n"); + return -EINVAL; + default: + return -EFAULT; + } + } + } + return 0; +} + +int lancer_fw_download(struct be_adapter *adapter, + const struct firmware *fw) +{ + struct device *dev = &adapter->pdev->dev; + struct be_dma_mem flash_cmd; + const u8 *data_ptr = NULL; + u8 *dest_image_ptr = NULL; + size_t image_size = 0; + u32 chunk_size = 0; + u32 data_written = 0; + u32 offset = 0; + int status = 0; + u8 add_status = 0; + u8 change_status; + + if (!IS_ALIGNED(fw->size, sizeof(u32))) { + dev_err(dev, "FW image size should be multiple of 4\n"); + return -EINVAL; + } + + flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + + LANCER_FW_DOWNLOAD_CHUNK; + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); + if (!flash_cmd.va) + return -ENOMEM; + + dest_image_ptr = flash_cmd.va + + sizeof(struct lancer_cmd_req_write_object); + image_size = fw->size; + data_ptr = fw->data; + + while (image_size) { + chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); + + /* Copy the image chunk content. */ + memcpy(dest_image_ptr, data_ptr, chunk_size); + + status = lancer_cmd_write_object(adapter, &flash_cmd, + chunk_size, offset, + LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &change_status, + &add_status); + if (status) + break; + + offset += data_written; + data_ptr += data_written; + image_size -= data_written; + } + + if (!status) { + /* Commit the FW written */ + status = lancer_cmd_write_object(adapter, &flash_cmd, + 0, offset, + LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &change_status, + &add_status); + } + + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); + if (status) { + dev_err(dev, "Firmware load error\n"); + return be_cmd_status(status); + } + + dev_info(dev, "Firmware flashed successfully\n"); + + if (change_status == LANCER_FW_RESET_NEEDED) { + dev_info(dev, "Resetting adapter to activate new FW\n"); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); + if (status) { + dev_err(dev, "Adapter busy, could not reset FW\n"); + dev_err(dev, "Reboot server to activate new FW\n"); + } + } else if (change_status != LANCER_NO_RESET_NEEDED) { + dev_info(dev, "Reboot server to activate new FW\n"); + } + + return 0; +} + +/* Check if the flash image file is compatible with the adapter that + * is being flashed. + */ +static bool be_check_ufi_compatibility(struct be_adapter *adapter, + struct flash_file_hdr_g3 *fhdr) +{ + if (!fhdr) { + dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); + return false; + } + + /* First letter of the build version is used to identify + * which chip this image file is meant for. + */ + switch (fhdr->build[0]) { + case BLD_STR_UFI_TYPE_SH: + if (!skyhawk_chip(adapter)) + return false; + break; + case BLD_STR_UFI_TYPE_BE3: + if (!BE3_chip(adapter)) + return false; + break; + case BLD_STR_UFI_TYPE_BE2: + if (!BE2_chip(adapter)) + return false; + break; + default: + return false; + } + + /* In BE3 FW images the "asic_type_rev" field doesn't track the + * asic_rev of the chips it is compatible with. + * When asic_type_rev is 0 the image is compatible only with + * pre-BE3-R chips (asic_rev < 0x10) + */ + if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) + return adapter->asic_rev < 0x10; + else + return (fhdr->asic_type_rev >= adapter->asic_rev); +} + +int be_fw_download(struct be_adapter *adapter, const struct firmware *fw) +{ + struct device *dev = &adapter->pdev->dev; + struct flash_file_hdr_g3 *fhdr3; + struct image_hdr *img_hdr_ptr; + int status = 0, i, num_imgs; + struct be_dma_mem flash_cmd; + + fhdr3 = (struct flash_file_hdr_g3 *)fw->data; + if (!be_check_ufi_compatibility(adapter, fhdr3)) { + dev_err(dev, "Flash image is not compatible with adapter\n"); + return -EINVAL; + } + + flash_cmd.size = sizeof(struct be_cmd_write_flashrom); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); + if (!flash_cmd.va) + return -ENOMEM; + + num_imgs = le32_to_cpu(fhdr3->num_imgs); + for (i = 0; i < num_imgs; i++) { + img_hdr_ptr = (struct image_hdr *)(fw->data + + (sizeof(struct flash_file_hdr_g3) + + i * sizeof(struct image_hdr))); + if (!BE2_chip(adapter) && + le32_to_cpu(img_hdr_ptr->imageid) != 1) + continue; + + if (skyhawk_chip(adapter)) + status = be_flash_skyhawk(adapter, fw, &flash_cmd, + num_imgs); + else + status = be_flash_BEx(adapter, fw, &flash_cmd, + num_imgs); + } + + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); + if (!status) + dev_info(dev, "Firmware flashed successfully\n"); + + return status; +} + int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_dma_mem *nonemb_cmd) { @@ -2892,7 +3454,6 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; - adapter->pci_func_num = attribs->pci_func_num; serial_num = attribs->hba_attribs.controller_serial_number; for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & @@ -3575,14 +4136,16 @@ int be_cmd_query_port_name(struct be_adapter *adapter) return status; } -/* Descriptor type */ -enum { - FUNC_DESC = 1, - VFT_DESC = 2 -}; - +/* When more than 1 NIC descriptor is present in the descriptor list, + * the caller must specify the pf_num to obtain the NIC descriptor + * corresponding to its pci function. + * get_vft must be true when the caller wants the VF-template desc of the + * PF-pool. + * The pf_num should be set to PF_NUM_IGNORE when the caller knows + * that only it's NIC descriptor is present in the descriptor list. + */ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, - int desc_type) + bool get_vft, u8 pf_num) { struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; struct be_nic_res_desc *nic; @@ -3592,40 +4155,42 @@ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { nic = (struct be_nic_res_desc *)hdr; - if (desc_type == FUNC_DESC || - (desc_type == VFT_DESC && - nic->flags & (1 << VFT_SHIFT))) + + if ((pf_num == PF_NUM_IGNORE || + nic->pf_num == pf_num) && + (!get_vft || nic->flags & BIT(VFT_SHIFT))) return nic; } - hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; hdr = (void *)hdr + hdr->desc_len; } return NULL; } -static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count) +static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count, + u8 pf_num) { - return be_get_nic_desc(buf, desc_count, VFT_DESC); + return be_get_nic_desc(buf, desc_count, true, pf_num); } -static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count) +static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count, + u8 pf_num) { - return be_get_nic_desc(buf, desc_count, FUNC_DESC); + return be_get_nic_desc(buf, desc_count, false, pf_num); } -static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, - u32 desc_count) +static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count, + u8 pf_num) { struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; struct be_pcie_res_desc *pcie; int i; for (i = 0; i < desc_count; i++) { - if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || - hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { - pcie = (struct be_pcie_res_desc *)hdr; - if (pcie->pf_num == devfn) + if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) { + pcie = (struct be_pcie_res_desc *)hdr; + if (pcie->pf_num == pf_num) return pcie; } @@ -3710,13 +4275,23 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) u32 desc_count = le32_to_cpu(resp->desc_count); struct be_nic_res_desc *desc; - desc = be_get_func_nic_desc(resp->func_param, desc_count); + /* GET_FUNC_CONFIG returns resource descriptors of the + * current function only. So, pf_num should be set to + * PF_NUM_IGNORE. + */ + desc = be_get_func_nic_desc(resp->func_param, desc_count, + PF_NUM_IGNORE); if (!desc) { status = -EINVAL; goto err; } - adapter->pf_number = desc->pf_num; - be_copy_nic_desc(res, desc); + + /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */ + adapter->pf_num = desc->pf_num; + adapter->vf_num = desc->vf_num; + + if (res) + be_copy_nic_desc(res, desc); } err: mutex_unlock(&adapter->mbox_lock); @@ -3726,10 +4301,7 @@ err: return status; } -/* Will use MBOX only if MCCQ has not been created - * non-zero domain => a PF is querying this on behalf of a VF - * zero domain => a PF or a VF is querying this for itself - */ +/* Will use MBOX only if MCCQ has not been created */ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 query, u8 domain) { @@ -3759,12 +4331,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, if (!lancer_chip(adapter)) req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; - /* When a function is querying profile information relating to - * itself hdr.pf_number must be set to it's pci_func_num + 1 - */ req->hdr.domain = domain; - if (domain == 0) - req->hdr.pf_num = adapter->pci_func_num + 1; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the * descriptors with all bits set to "1" for the fields which can be @@ -3780,8 +4347,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, resp = cmd.va; desc_count = le16_to_cpu(resp->desc_count); - pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, - desc_count); + pcie = be_get_pcie_desc(resp->func_param, desc_count, + adapter->pf_num); if (pcie) res->max_vfs = le16_to_cpu(pcie->num_vfs); @@ -3789,11 +4356,13 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, if (port) adapter->mc_type = port->mc_type; - nic = be_get_func_nic_desc(resp->func_param, desc_count); + nic = be_get_func_nic_desc(resp->func_param, desc_count, + adapter->pf_num); if (nic) be_copy_nic_desc(res, nic); - vf_res = be_get_vft_desc(resp->func_param, desc_count); + vf_res = be_get_vft_desc(resp->func_param, desc_count, + adapter->pf_num); if (vf_res) res->vf_if_cap_flags = vf_res->cap_flags; err: @@ -3883,7 +4452,7 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, return be_cmd_set_qos(adapter, max_rate / 10, domain); be_reset_nic_desc(&nic_desc); - nic_desc.pf_num = adapter->pf_number; + nic_desc.pf_num = adapter->pf_num; nic_desc.vf_num = domain; nic_desc.bw_min = 0; if (lancer_chip(adapter)) { @@ -4260,16 +4829,13 @@ err: return status; } -int be_cmd_set_logical_link_config(struct be_adapter *adapter, - int link_state, u8 domain) +int __be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, int version, u8 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_ll_link *req; int status; - if (BEx_chip(adapter) || lancer_chip(adapter)) - return -EOPNOTSUPP; - spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -4284,14 +4850,15 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter, OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, sizeof(*req), wrb, NULL); - req->hdr.version = 1; + req->hdr.version = version; req->hdr.domain = domain; - if (link_state == IFLA_VF_LINK_STATE_ENABLE) - req->link_config |= 1; + if (link_state == IFLA_VF_LINK_STATE_ENABLE || + link_state == IFLA_VF_LINK_STATE_AUTO) + req->link_config |= PLINK_ENABLE; if (link_state == IFLA_VF_LINK_STATE_AUTO) - req->link_config |= 1 << PLINK_TRACK_SHIFT; + req->link_config |= PLINK_TRACK; status = be_mcc_notify_wait(adapter); err: @@ -4299,6 +4866,25 @@ err: return status; } +int be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, u8 domain) +{ + int status; + + if (BEx_chip(adapter)) + return -EOPNOTSUPP; + + status = __be_cmd_set_logical_link_config(adapter, link_state, + 2, domain); + + /* Version 2 of the command will not be recognized by older FW. + * On such a failure issue version 1 of the command. + */ + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST) + status = __be_cmd_set_logical_link_config(adapter, link_state, + 1, domain); + return status; +} int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, int wrb_payload_size, u16 *cmd_status, u16 *ext_status) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 91155ea74f34..241819b36ca7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -66,7 +66,9 @@ enum mcc_addl_status { MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16, MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d, MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a, - MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab + MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab, + MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56, + MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57 }; #define CQE_BASE_STATUS_MASK 0xFFFF @@ -289,9 +291,7 @@ struct be_cmd_req_hdr { u32 timeout; /* dword 1 */ u32 request_length; /* dword 2 */ u8 version; /* dword 3 */ - u8 rsvd1; /* dword 3 */ - u8 pf_num; /* dword 3 */ - u8 rsvd2; /* dword 3 */ + u8 rsvd[3]; /* dword 3 */ }; #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ @@ -1207,68 +1207,85 @@ struct be_cmd_resp_get_beacon_state { /* Flashrom related descriptors */ #define MAX_FLASH_COMP 32 -#define OPTYPE_ISCSI_ACTIVE 0 -#define OPTYPE_REDBOOT 1 -#define OPTYPE_BIOS 2 -#define OPTYPE_PXE_BIOS 3 -#define OPTYPE_OFFSET_SPECIFIED 7 -#define OPTYPE_FCOE_BIOS 8 -#define OPTYPE_ISCSI_BACKUP 9 -#define OPTYPE_FCOE_FW_ACTIVE 10 -#define OPTYPE_FCOE_FW_BACKUP 11 -#define OPTYPE_NCSI_FW 13 -#define OPTYPE_REDBOOT_DIR 18 -#define OPTYPE_REDBOOT_CONFIG 19 -#define OPTYPE_SH_PHY_FW 21 -#define OPTYPE_FLASHISM_JUMPVECTOR 22 -#define OPTYPE_UFI_DIR 23 -#define OPTYPE_PHY_FW 99 - -#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 262144 /* Max OPTION ROM image sz */ -#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 262144 /* Max Redboot image sz */ -#define FLASH_IMAGE_MAX_SIZE_g2 1310720 /* Max firmware image size */ - -#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 262144 -#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144 -#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 524288 /* Max OPTION ROM image sz */ -#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 1048576 /* Max Redboot image sz */ -#define FLASH_IMAGE_MAX_SIZE_g3 2097152 /* Max firmware image size */ - -/* Offsets for components on Flash. */ -#define FLASH_REDBOOT_START_g2 0 -#define FLASH_FCoE_BIOS_START_g2 524288 -#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 1048576 -#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 2359296 -#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 3670016 -#define FLASH_FCoE_BACKUP_IMAGE_START_g2 4980736 -#define FLASH_iSCSI_BIOS_START_g2 7340032 -#define FLASH_PXE_BIOS_START_g2 7864320 - -#define FLASH_REDBOOT_START_g3 262144 -#define FLASH_PHY_FW_START_g3 1310720 -#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 2097152 -#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 4194304 -#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 6291456 -#define FLASH_FCoE_BACKUP_IMAGE_START_g3 8388608 -#define FLASH_iSCSI_BIOS_START_g3 12582912 -#define FLASH_PXE_BIOS_START_g3 13107200 -#define FLASH_FCoE_BIOS_START_g3 13631488 -#define FLASH_NCSI_START_g3 15990784 - -#define IMAGE_NCSI 16 -#define IMAGE_OPTION_ROM_PXE 32 -#define IMAGE_OPTION_ROM_FCoE 33 -#define IMAGE_OPTION_ROM_ISCSI 34 -#define IMAGE_FLASHISM_JUMPVECTOR 48 -#define IMAGE_FIRMWARE_iSCSI 160 -#define IMAGE_FIRMWARE_FCoE 162 -#define IMAGE_FIRMWARE_BACKUP_iSCSI 176 -#define IMAGE_FIRMWARE_BACKUP_FCoE 178 -#define IMAGE_FIRMWARE_PHY 192 -#define IMAGE_REDBOOT_DIR 208 -#define IMAGE_REDBOOT_CONFIG 209 -#define IMAGE_UFI_DIR 210 -#define IMAGE_BOOT_CODE 224 +/* Optypes of each component in the UFI */ +enum { + OPTYPE_ISCSI_ACTIVE = 0, + OPTYPE_REDBOOT = 1, + OPTYPE_BIOS = 2, + OPTYPE_PXE_BIOS = 3, + OPTYPE_OFFSET_SPECIFIED = 7, + OPTYPE_FCOE_BIOS = 8, + OPTYPE_ISCSI_BACKUP = 9, + OPTYPE_FCOE_FW_ACTIVE = 10, + OPTYPE_FCOE_FW_BACKUP = 11, + OPTYPE_NCSI_FW = 13, + OPTYPE_REDBOOT_DIR = 18, + OPTYPE_REDBOOT_CONFIG = 19, + OPTYPE_SH_PHY_FW = 21, + OPTYPE_FLASHISM_JUMPVECTOR = 22, + OPTYPE_UFI_DIR = 23, + OPTYPE_PHY_FW = 99 +}; + +/* Maximum sizes of components in BE2 FW UFI */ +enum { + BE2_BIOS_COMP_MAX_SIZE = 0x40000, + BE2_REDBOOT_COMP_MAX_SIZE = 0x40000, + BE2_COMP_MAX_SIZE = 0x140000 +}; + +/* Maximum sizes of components in BE3 FW UFI */ +enum { + BE3_NCSI_COMP_MAX_SIZE = 0x40000, + BE3_PHY_FW_COMP_MAX_SIZE = 0x40000, + BE3_BIOS_COMP_MAX_SIZE = 0x80000, + BE3_REDBOOT_COMP_MAX_SIZE = 0x100000, + BE3_COMP_MAX_SIZE = 0x200000 +}; + +/* Offsets for components in BE2 FW UFI */ +enum { + BE2_REDBOOT_START = 0x8000, + BE2_FCOE_BIOS_START = 0x80000, + BE2_ISCSI_PRIMARY_IMAGE_START = 0x100000, + BE2_ISCSI_BACKUP_IMAGE_START = 0x240000, + BE2_FCOE_PRIMARY_IMAGE_START = 0x380000, + BE2_FCOE_BACKUP_IMAGE_START = 0x4c0000, + BE2_ISCSI_BIOS_START = 0x700000, + BE2_PXE_BIOS_START = 0x780000 +}; + +/* Offsets for components in BE3 FW UFI */ +enum { + BE3_REDBOOT_START = 0x40000, + BE3_PHY_FW_START = 0x140000, + BE3_ISCSI_PRIMARY_IMAGE_START = 0x200000, + BE3_ISCSI_BACKUP_IMAGE_START = 0x400000, + BE3_FCOE_PRIMARY_IMAGE_START = 0x600000, + BE3_FCOE_BACKUP_IMAGE_START = 0x800000, + BE3_ISCSI_BIOS_START = 0xc00000, + BE3_PXE_BIOS_START = 0xc80000, + BE3_FCOE_BIOS_START = 0xd00000, + BE3_NCSI_START = 0xf40000 +}; + +/* Component entry types */ +enum { + IMAGE_NCSI = 0x10, + IMAGE_OPTION_ROM_PXE = 0x20, + IMAGE_OPTION_ROM_FCOE = 0x21, + IMAGE_OPTION_ROM_ISCSI = 0x22, + IMAGE_FLASHISM_JUMPVECTOR = 0x30, + IMAGE_FIRMWARE_ISCSI = 0xa0, + IMAGE_FIRMWARE_FCOE = 0xa2, + IMAGE_FIRMWARE_BACKUP_ISCSI = 0xb0, + IMAGE_FIRMWARE_BACKUP_FCOE = 0xb2, + IMAGE_FIRMWARE_PHY = 0xc0, + IMAGE_REDBOOT_DIR = 0xd0, + IMAGE_REDBOOT_CONFIG = 0xd1, + IMAGE_UFI_DIR = 0xd2, + IMAGE_BOOT_CODE = 0xe2 +}; struct controller_id { u32 vendor; @@ -1394,6 +1411,9 @@ struct be_cmd_read_flash_crc { } __packed; /**************** Lancer Firmware Flash ************/ +#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) +#define LANCER_FW_DOWNLOAD_LOCATION "/prg" + struct amap_lancer_write_obj_context { u8 write_length[24]; u8 reserved1[7]; @@ -1654,11 +1674,7 @@ struct mgmt_hba_attribs { struct mgmt_controller_attrib { struct mgmt_hba_attribs hba_attribs; - u32 rsvd0[2]; - u16 rsvd1; - u8 pci_func_num; - u8 rsvd2; - u32 rsvd3[7]; + u32 rsvd0[10]; } __packed; struct be_cmd_req_cntl_attribs { @@ -2083,6 +2099,7 @@ struct be_port_res_desc { #define NV_TYPE_VXLAN 3 #define SOCVID_SHIFT 2 /* Strip outer vlan */ #define RCVID_SHIFT 4 /* Report vlan */ +#define PF_NUM_IGNORE 255 u8 nv_flags; u8 rsvd2; __le16 nv_port; /* vxlan/gre port */ @@ -2246,7 +2263,8 @@ struct be_cmd_resp_get_iface_list { }; /*************** Set logical link ********************/ -#define PLINK_TRACK_SHIFT 8 +#define PLINK_ENABLE BIT(0) +#define PLINK_TRACK BIT(8) struct be_cmd_req_set_ll_link { struct be_cmd_req_hdr hdr; u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */ @@ -2321,19 +2339,11 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, u8 page_num, u8 *data); int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_query_sfp_info(struct be_adapter *adapter); -int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 flash_oper, u32 flash_opcode, u32 img_offset, - u32 buf_size); -int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, - const char *obj_name, u32 *data_written, - u8 *change_status, u8 *addn_status); int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_read, u32 *eof, u8 *addn_status); -int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name); -int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, - u16 img_optype, u32 img_offset, u32 crc_offset); +int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw); +int be_fw_download(struct be_adapter *adapter, const struct firmware *fw); int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_dma_mem *nonemb_cmd); int be_cmd_fw_init(struct be_adapter *adapter); @@ -2355,9 +2365,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, void be_detect_error(struct be_adapter *adapter); int be_cmd_get_die_temperature(struct be_adapter *adapter); int be_cmd_get_cntl_attributes(struct be_adapter *adapter); +int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size); +int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf); int be_cmd_req_native_mode(struct be_adapter *adapter); -int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); -int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, u32 domain); int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 734f655c99c1..a19ac441336f 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -241,17 +241,28 @@ static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) u32 data_read = 0, eof; u8 addn_status; struct be_dma_mem data_len_cmd; - int status; memset(&data_len_cmd, 0, sizeof(data_len_cmd)); /* data_offset and data_size should be 0 to get reg len */ - status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, - file_name, &data_read, &eof, - &addn_status); + lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name, + &data_read, &eof, &addn_status); return data_read; } +static int be_get_dump_len(struct be_adapter *adapter) +{ + u32 dump_size = 0; + + if (lancer_chip(adapter)) + dump_size = lancer_cmd_get_file_len(adapter, + LANCER_FW_DUMP_FILE); + else + dump_size = adapter->fat_dump_len; + + return dump_size; +} + static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, u32 buf_len, void *buf) { @@ -293,37 +304,18 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, return status; } -static int be_get_reg_len(struct net_device *netdev) +static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len, + void *buf) { - struct be_adapter *adapter = netdev_priv(netdev); - u32 log_size = 0; - - if (!check_privilege(adapter, MAX_PRIVILEGES)) - return 0; - - if (be_physfn(adapter)) { - if (lancer_chip(adapter)) - log_size = lancer_cmd_get_file_len(adapter, - LANCER_FW_DUMP_FILE); - else - be_cmd_get_reg_len(adapter, &log_size); - } - return log_size; -} + int status = 0; -static void -be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) -{ - struct be_adapter *adapter = netdev_priv(netdev); + if (lancer_chip(adapter)) + status = lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, + dump_len, buf); + else + status = be_cmd_get_fat_dump(adapter, dump_len, buf); - if (be_physfn(adapter)) { - memset(buf, 0, regs->len); - if (lancer_chip(adapter)) - lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, - regs->len, buf); - else - be_cmd_get_regs(adapter, regs->len, buf); - } + return status; } static int be_get_coalesce(struct net_device *netdev, @@ -916,6 +908,34 @@ static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) return be_load_fw(adapter, efl->data); } +static int +be_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + dump->len = be_get_dump_len(adapter); + dump->version = 1; + dump->flag = 0x1; /* FW dump is enabled */ + return 0; +} + +static int +be_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buf) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_read_dump_data(adapter, dump->len, buf); + return be_cmd_status(status); +} + static int be_get_eeprom_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -1313,8 +1333,6 @@ const struct ethtool_ops be_ethtool_ops = { .set_msglevel = be_set_msg_level, .get_sset_count = be_get_sset_count, .get_ethtool_stats = be_get_ethtool_stats, - .get_regs_len = be_get_reg_len, - .get_regs = be_get_regs, .flash_device = be_do_flash, .self_test = be_self_test, .get_rxnfc = be_get_rxnfc, @@ -1323,6 +1341,8 @@ const struct ethtool_ops be_ethtool_ops = { .get_rxfh_key_size = be_get_rxfh_key_size, .get_rxfh = be_get_rxfh, .set_rxfh = be_set_rxfh, + .get_dump_flag = be_get_dump_flag, + .get_dump_data = be_get_dump_data, .get_channels = be_get_channels, .set_channels = be_set_channels, .get_module_info = be_get_module_info, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 8a1d9fffd7d6..f99de3657ce3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -729,7 +729,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, /* If vlan priority provided by OS is NOT in available bmap */ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | - adapter->recommended_prio; + adapter->recommended_prio_bits; return vlan_tag; } @@ -2184,7 +2184,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); skb->csum_level = rxcp->tunneled; - skb_mark_napi_id(skb, napi); if (rxcp->vlanf) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); @@ -2631,7 +2630,6 @@ static int be_evt_queues_create(struct be_adapter *adapter) eqo->affinity_mask); netif_napi_add(adapter->netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); - napi_hash_add(&eqo->napi); } return 0; } @@ -4204,10 +4202,17 @@ static int be_get_config(struct be_adapter *adapter) int status, level; u16 profile_id; + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + status = be_cmd_query_fw_cfg(adapter); if (status) return status; + if (!lancer_chip(adapter) && be_physfn(adapter)) + be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len); + if (BEx_chip(adapter)) { level = be_cmd_get_fw_log_level(adapter); adapter->msg_enable = @@ -4402,10 +4407,14 @@ static int be_setup(struct be_adapter *adapter) if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); - /* Need to invoke this cmd first to get the PCI Function Number */ - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; + /* invoke this cmd first to get pf_num and vf_num which are needed + * for issuing profile related cmds + */ + if (!BEx_chip(adapter)) { + status = be_cmd_get_func_config(adapter, NULL); + if (status) + return status; + } if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); @@ -4490,570 +4499,6 @@ static void be_netpoll(struct net_device *netdev) } #endif -static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; - -static bool phy_flashing_required(struct be_adapter *adapter) -{ - return (adapter->phy.phy_type == PHY_TYPE_TN_8022 && - adapter->phy.interface_type == PHY_TYPE_BASET_10GB); -} - -static bool is_comp_in_ufi(struct be_adapter *adapter, - struct flash_section_info *fsec, int type) -{ - int i = 0, img_type = 0; - struct flash_section_info_g2 *fsec_g2 = NULL; - - if (BE2_chip(adapter)) - fsec_g2 = (struct flash_section_info_g2 *)fsec; - - for (i = 0; i < MAX_FLASH_COMP; i++) { - if (fsec_g2) - img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); - else - img_type = le32_to_cpu(fsec->fsec_entry[i].type); - - if (img_type == type) - return true; - } - return false; - -} - -static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, - int header_size, - const struct firmware *fw) -{ - struct flash_section_info *fsec = NULL; - const u8 *p = fw->data; - - p += header_size; - while (p < (fw->data + fw->size)) { - fsec = (struct flash_section_info *)p; - if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) - return fsec; - p += 32; - } - return NULL; -} - -static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, - u32 img_offset, u32 img_size, int hdr_size, - u16 img_optype, bool *crc_match) -{ - u32 crc_offset; - int status; - u8 crc[4]; - - status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset, - img_size - 4); - if (status) - return status; - - crc_offset = hdr_size + img_offset + img_size - 4; - - /* Skip flashing, if crc of flashed region matches */ - if (!memcmp(crc, p + crc_offset, 4)) - *crc_match = true; - else - *crc_match = false; - - return status; -} - -static int be_flash(struct be_adapter *adapter, const u8 *img, - struct be_dma_mem *flash_cmd, int optype, int img_size, - u32 img_offset) -{ - u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0; - struct be_cmd_write_flashrom *req = flash_cmd->va; - int status; - - while (total_bytes) { - num_bytes = min_t(u32, 32*1024, total_bytes); - - total_bytes -= num_bytes; - - if (!total_bytes) { - if (optype == OPTYPE_PHY_FW) - flash_op = FLASHROM_OPER_PHY_FLASH; - else - flash_op = FLASHROM_OPER_FLASH; - } else { - if (optype == OPTYPE_PHY_FW) - flash_op = FLASHROM_OPER_PHY_SAVE; - else - flash_op = FLASHROM_OPER_SAVE; - } - - memcpy(req->data_buf, img, num_bytes); - img += num_bytes; - status = be_cmd_write_flashrom(adapter, flash_cmd, optype, - flash_op, img_offset + - bytes_sent, num_bytes); - if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && - optype == OPTYPE_PHY_FW) - break; - else if (status) - return status; - - bytes_sent += num_bytes; - } - return 0; -} - -/* For BE2, BE3 and BE3-R */ -static int be_flash_BEx(struct be_adapter *adapter, - const struct firmware *fw, - struct be_dma_mem *flash_cmd, int num_of_images) -{ - int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); - struct device *dev = &adapter->pdev->dev; - struct flash_section_info *fsec = NULL; - int status, i, filehdr_size, num_comp; - const struct flash_comp *pflashcomp; - bool crc_match; - const u8 *p; - - struct flash_comp gen3_flash_types[] = { - { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI}, - { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT, - FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE}, - { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI}, - { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE}, - { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE}, - { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI}, - { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE}, - { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP, - FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE}, - { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW, - FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI}, - { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW, - FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY} - }; - - struct flash_comp gen2_flash_types[] = { - { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI}, - { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT, - FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE}, - { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI}, - { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE}, - { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE}, - { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI}, - { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE}, - { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP, - FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE} - }; - - if (BE3_chip(adapter)) { - pflashcomp = gen3_flash_types; - filehdr_size = sizeof(struct flash_file_hdr_g3); - num_comp = ARRAY_SIZE(gen3_flash_types); - } else { - pflashcomp = gen2_flash_types; - filehdr_size = sizeof(struct flash_file_hdr_g2); - num_comp = ARRAY_SIZE(gen2_flash_types); - img_hdrs_size = 0; - } - - /* Get flash section info*/ - fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); - if (!fsec) { - dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); - return -1; - } - for (i = 0; i < num_comp; i++) { - if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) - continue; - - if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && - memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) - continue; - - if (pflashcomp[i].optype == OPTYPE_PHY_FW && - !phy_flashing_required(adapter)) - continue; - - if (pflashcomp[i].optype == OPTYPE_REDBOOT) { - status = be_check_flash_crc(adapter, fw->data, - pflashcomp[i].offset, - pflashcomp[i].size, - filehdr_size + - img_hdrs_size, - OPTYPE_REDBOOT, &crc_match); - if (status) { - dev_err(dev, - "Could not get CRC for 0x%x region\n", - pflashcomp[i].optype); - continue; - } - - if (crc_match) - continue; - } - - p = fw->data + filehdr_size + pflashcomp[i].offset + - img_hdrs_size; - if (p + pflashcomp[i].size > fw->data + fw->size) - return -1; - - status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, - pflashcomp[i].size, 0); - if (status) { - dev_err(dev, "Flashing section type 0x%x failed\n", - pflashcomp[i].img_type); - return status; - } - } - return 0; -} - -static u16 be_get_img_optype(struct flash_section_entry fsec_entry) -{ - u32 img_type = le32_to_cpu(fsec_entry.type); - u16 img_optype = le16_to_cpu(fsec_entry.optype); - - if (img_optype != 0xFFFF) - return img_optype; - - switch (img_type) { - case IMAGE_FIRMWARE_iSCSI: - img_optype = OPTYPE_ISCSI_ACTIVE; - break; - case IMAGE_BOOT_CODE: - img_optype = OPTYPE_REDBOOT; - break; - case IMAGE_OPTION_ROM_ISCSI: - img_optype = OPTYPE_BIOS; - break; - case IMAGE_OPTION_ROM_PXE: - img_optype = OPTYPE_PXE_BIOS; - break; - case IMAGE_OPTION_ROM_FCoE: - img_optype = OPTYPE_FCOE_BIOS; - break; - case IMAGE_FIRMWARE_BACKUP_iSCSI: - img_optype = OPTYPE_ISCSI_BACKUP; - break; - case IMAGE_NCSI: - img_optype = OPTYPE_NCSI_FW; - break; - case IMAGE_FLASHISM_JUMPVECTOR: - img_optype = OPTYPE_FLASHISM_JUMPVECTOR; - break; - case IMAGE_FIRMWARE_PHY: - img_optype = OPTYPE_SH_PHY_FW; - break; - case IMAGE_REDBOOT_DIR: - img_optype = OPTYPE_REDBOOT_DIR; - break; - case IMAGE_REDBOOT_CONFIG: - img_optype = OPTYPE_REDBOOT_CONFIG; - break; - case IMAGE_UFI_DIR: - img_optype = OPTYPE_UFI_DIR; - break; - default: - break; - } - - return img_optype; -} - -static int be_flash_skyhawk(struct be_adapter *adapter, - const struct firmware *fw, - struct be_dma_mem *flash_cmd, int num_of_images) -{ - int img_hdrs_size = num_of_images * sizeof(struct image_hdr); - bool crc_match, old_fw_img, flash_offset_support = true; - struct device *dev = &adapter->pdev->dev; - struct flash_section_info *fsec = NULL; - u32 img_offset, img_size, img_type; - u16 img_optype, flash_optype; - int status, i, filehdr_size; - const u8 *p; - - filehdr_size = sizeof(struct flash_file_hdr_g3); - fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); - if (!fsec) { - dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); - return -EINVAL; - } - -retry_flash: - for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { - img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); - img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); - img_type = le32_to_cpu(fsec->fsec_entry[i].type); - img_optype = be_get_img_optype(fsec->fsec_entry[i]); - old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; - - if (img_optype == 0xFFFF) - continue; - - if (flash_offset_support) - flash_optype = OPTYPE_OFFSET_SPECIFIED; - else - flash_optype = img_optype; - - /* Don't bother verifying CRC if an old FW image is being - * flashed - */ - if (old_fw_img) - goto flash; - - status = be_check_flash_crc(adapter, fw->data, img_offset, - img_size, filehdr_size + - img_hdrs_size, flash_optype, - &crc_match); - if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || - base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { - /* The current FW image on the card does not support - * OFFSET based flashing. Retry using older mechanism - * of OPTYPE based flashing - */ - if (flash_optype == OPTYPE_OFFSET_SPECIFIED) { - flash_offset_support = false; - goto retry_flash; - } - - /* The current FW image on the card does not recognize - * the new FLASH op_type. The FW download is partially - * complete. Reboot the server now to enable FW image - * to recognize the new FLASH op_type. To complete the - * remaining process, download the same FW again after - * the reboot. - */ - dev_err(dev, "Flash incomplete. Reset the server\n"); - dev_err(dev, "Download FW image again after reset\n"); - return -EAGAIN; - } else if (status) { - dev_err(dev, "Could not get CRC for 0x%x region\n", - img_optype); - return -EFAULT; - } - - if (crc_match) - continue; - -flash: - p = fw->data + filehdr_size + img_offset + img_hdrs_size; - if (p + img_size > fw->data + fw->size) - return -1; - - status = be_flash(adapter, p, flash_cmd, flash_optype, img_size, - img_offset); - - /* The current FW image on the card does not support OFFSET - * based flashing. Retry using older mechanism of OPTYPE based - * flashing - */ - if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD && - flash_optype == OPTYPE_OFFSET_SPECIFIED) { - flash_offset_support = false; - goto retry_flash; - } - - /* For old FW images ignore ILLEGAL_FIELD error or errors on - * UFI_DIR region - */ - if (old_fw_img && - (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || - (img_optype == OPTYPE_UFI_DIR && - base_status(status) == MCC_STATUS_FAILED))) { - continue; - } else if (status) { - dev_err(dev, "Flashing section type 0x%x failed\n", - img_type); - return -EFAULT; - } - } - return 0; -} - -static int lancer_fw_download(struct be_adapter *adapter, - const struct firmware *fw) -{ -#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) -#define LANCER_FW_DOWNLOAD_LOCATION "/prg" - struct device *dev = &adapter->pdev->dev; - struct be_dma_mem flash_cmd; - const u8 *data_ptr = NULL; - u8 *dest_image_ptr = NULL; - size_t image_size = 0; - u32 chunk_size = 0; - u32 data_written = 0; - u32 offset = 0; - int status = 0; - u8 add_status = 0; - u8 change_status; - - if (!IS_ALIGNED(fw->size, sizeof(u32))) { - dev_err(dev, "FW image size should be multiple of 4\n"); - return -EINVAL; - } - - flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) - + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); - if (!flash_cmd.va) - return -ENOMEM; - - dest_image_ptr = flash_cmd.va + - sizeof(struct lancer_cmd_req_write_object); - image_size = fw->size; - data_ptr = fw->data; - - while (image_size) { - chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); - - /* Copy the image chunk content. */ - memcpy(dest_image_ptr, data_ptr, chunk_size); - - status = lancer_cmd_write_object(adapter, &flash_cmd, - chunk_size, offset, - LANCER_FW_DOWNLOAD_LOCATION, - &data_written, &change_status, - &add_status); - if (status) - break; - - offset += data_written; - data_ptr += data_written; - image_size -= data_written; - } - - if (!status) { - /* Commit the FW written */ - status = lancer_cmd_write_object(adapter, &flash_cmd, - 0, offset, - LANCER_FW_DOWNLOAD_LOCATION, - &data_written, &change_status, - &add_status); - } - - dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); - if (status) { - dev_err(dev, "Firmware load error\n"); - return be_cmd_status(status); - } - - dev_info(dev, "Firmware flashed successfully\n"); - - if (change_status == LANCER_FW_RESET_NEEDED) { - dev_info(dev, "Resetting adapter to activate new FW\n"); - status = lancer_physdev_ctrl(adapter, - PHYSDEV_CONTROL_FW_RESET_MASK); - if (status) { - dev_err(dev, "Adapter busy, could not reset FW\n"); - dev_err(dev, "Reboot server to activate new FW\n"); - } - } else if (change_status != LANCER_NO_RESET_NEEDED) { - dev_info(dev, "Reboot server to activate new FW\n"); - } - - return 0; -} - -/* Check if the flash image file is compatible with the adapter that - * is being flashed. - */ -static bool be_check_ufi_compatibility(struct be_adapter *adapter, - struct flash_file_hdr_g3 *fhdr) -{ - if (!fhdr) { - dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); - return false; - } - - /* First letter of the build version is used to identify - * which chip this image file is meant for. - */ - switch (fhdr->build[0]) { - case BLD_STR_UFI_TYPE_SH: - if (!skyhawk_chip(adapter)) - return false; - break; - case BLD_STR_UFI_TYPE_BE3: - if (!BE3_chip(adapter)) - return false; - break; - case BLD_STR_UFI_TYPE_BE2: - if (!BE2_chip(adapter)) - return false; - break; - default: - return false; - } - - /* In BE3 FW images the "asic_type_rev" field doesn't track the - * asic_rev of the chips it is compatible with. - * When asic_type_rev is 0 the image is compatible only with - * pre-BE3-R chips (asic_rev < 0x10) - */ - if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) - return adapter->asic_rev < 0x10; - else - return (fhdr->asic_type_rev >= adapter->asic_rev); -} - -static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) -{ - struct device *dev = &adapter->pdev->dev; - struct flash_file_hdr_g3 *fhdr3; - struct image_hdr *img_hdr_ptr; - int status = 0, i, num_imgs; - struct be_dma_mem flash_cmd; - - fhdr3 = (struct flash_file_hdr_g3 *)fw->data; - if (!be_check_ufi_compatibility(adapter, fhdr3)) { - dev_err(dev, "Flash image is not compatible with adapter\n"); - return -EINVAL; - } - - flash_cmd.size = sizeof(struct be_cmd_write_flashrom); - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, - GFP_KERNEL); - if (!flash_cmd.va) - return -ENOMEM; - - num_imgs = le32_to_cpu(fhdr3->num_imgs); - for (i = 0; i < num_imgs; i++) { - img_hdr_ptr = (struct image_hdr *)(fw->data + - (sizeof(struct flash_file_hdr_g3) + - i * sizeof(struct image_hdr))); - if (!BE2_chip(adapter) && - le32_to_cpu(img_hdr_ptr->imageid) != 1) - continue; - - if (skyhawk_chip(adapter)) - status = be_flash_skyhawk(adapter, fw, &flash_cmd, - num_imgs); - else - status = be_flash_BEx(adapter, fw, &flash_cmd, - num_imgs); - } - - dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); - if (!status) - dev_info(dev, "Firmware flashed successfully\n"); - - return status; -} - int be_load_fw(struct be_adapter *adapter, u8 *fw_file) { const struct firmware *fw; @@ -5108,6 +4553,9 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, return -EINVAL; mode = nla_get_u16(attr); + if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA) + return -EOPNOTSUPP; + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; @@ -5289,7 +4737,7 @@ static netdev_features_t be_features_check(struct sk_buff *skb, skb->inner_protocol != htons(ETH_P_TEB) || skb_inner_mac_header(skb) - skb_transport_header(skb) != sizeof(struct udphdr) + sizeof(struct vxlanhdr)) - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ff665493ca97..62fa136554ac 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -678,7 +678,7 @@ static int ethoc_mdio_probe(struct net_device *dev) int err; if (priv->phy_id != -1) - phy = priv->mdio->phy_map[priv->phy_id]; + phy = mdiobus_get_phy(priv->mdio, priv->phy_id); else phy = phy_find_first(priv->mdio); @@ -766,7 +766,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (mdio->phy_id >= PHY_MAX_ADDR) return -ERANGE; - phy = priv->mdio->phy_map[mdio->phy_id]; + phy = mdiobus_get_phy(priv->mdio, mdio->phy_id); if (!phy) return -ENODEV; } else { @@ -1015,7 +1015,6 @@ static int ethoc_probe(struct platform_device *pdev) struct resource *mmio = NULL; struct resource *mem = NULL; struct ethoc *priv = NULL; - unsigned int phy; int num_bd; int ret = 0; bool random_mac = false; @@ -1206,19 +1205,10 @@ static int ethoc_probe(struct platform_device *pdev) priv->mdio->write = ethoc_mdio_write; priv->mdio->priv = priv; - priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mdio->irq) { - ret = -ENOMEM; - goto free_mdio; - } - - for (phy = 0; phy < PHY_MAX_ADDR; phy++) - priv->mdio->irq[phy] = PHY_POLL; - ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); - goto free_mdio; + goto free; } ret = ethoc_mdio_probe(netdev); @@ -1250,8 +1240,6 @@ error2: netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); -free_mdio: - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: if (priv->clk) diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 6d0c5d5eea6d..84384e1585a5 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -71,7 +71,6 @@ struct ftgmac100 { struct napi_struct napi; struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; struct phy_device *phydev; int old_speed; }; @@ -835,26 +834,15 @@ static void ftgmac100_adjust_link(struct net_device *netdev) static int ftgmac100_mii_probe(struct ftgmac100 *priv) { struct net_device *netdev = priv->netdev; - struct phy_device *phydev = NULL; - int i; - - /* search for connect PHY device */ - for (i = 0; i < PHY_MAX_ADDR; i++) { - struct phy_device *tmp = priv->mii_bus->phy_map[i]; - - if (tmp) { - phydev = tmp; - break; - } - } + struct phy_device *phydev; - /* now we are supposed to have a proper phydev, to attach to... */ + phydev = phy_find_first(priv->mii_bus); if (!phydev) { netdev_info(netdev, "%s: no PHY found\n", netdev->name); return -ENODEV; } - phydev = phy_connect(netdev, dev_name(&phydev->dev), + phydev = phy_connect(netdev, phydev_name(phydev), &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phydev)) { @@ -1188,7 +1176,6 @@ static int ftgmac100_probe(struct platform_device *pdev) struct net_device *netdev; struct ftgmac100 *priv; int err; - int i; if (!pdev) return -ENODEV; @@ -1257,10 +1244,6 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->mii_bus->priv = netdev; priv->mii_bus->read = ftgmac100_mdiobus_read; priv->mii_bus->write = ftgmac100_mdiobus_write; - priv->mii_bus->irq = priv->phy_irq; - - for (i = 0; i < PHY_MAX_ADDR; i++) - priv->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(priv->mii_bus); if (err) { diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index bee32a9d9876..d1ca45fbb164 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -54,6 +54,7 @@ config FEC_MPC52xx_MDIO If compiled as module, it will be called fec_mpc52xx_phy. source "drivers/net/ethernet/freescale/fs_enet/Kconfig" +source "drivers/net/ethernet/freescale/fman/Kconfig" config FSL_PQ_MDIO tristate "Freescale PQ MDIO" diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 71debd1c18c9..4097c58d17a7 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -17,3 +17,5 @@ gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o + +obj-$(CONFIG_FSL_FMAN) += fman/ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b2a32209ffbf..502da6f48f95 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -48,6 +48,7 @@ #include <linux/irq.h> #include <linux/clk.h> #include <linux/platform_device.h> +#include <linux/mdio.h> #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> @@ -1926,11 +1927,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) } else { /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if ((fep->mii_bus->phy_mask & (1 << phy_id))) - continue; - if (fep->mii_bus->phy_map[phy_id] == NULL) - continue; - if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) + if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) continue; if (dev_id--) continue; @@ -1972,9 +1969,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; - netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), - fep->phy_dev->irq); + phy_attached_info(phy_dev); return 0; } @@ -1985,7 +1980,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct device_node *node; - int err = -ENXIO, i; + int err = -ENXIO; u32 mii_speed, holdtime; /* @@ -2067,15 +2062,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->priv = fep; fep->mii_bus->parent = &pdev->dev; - fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!fep->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - fep->mii_bus->irq[i] = PHY_POLL; - node = of_get_child_by_name(pdev->dev.of_node, "mdio"); if (node) { err = of_mdiobus_register(fep->mii_bus, node); @@ -2085,7 +2071,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) } if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; mii_cnt++; @@ -2095,8 +2081,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) return 0; -err_out_free_mdio_irq: - kfree(fep->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(fep->mii_bus); err_out: @@ -2107,7 +2091,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) { if (--mii_cnt == 0) { mdiobus_unregister(fep->mii_bus); - kfree(fep->mii_bus->irq); mdiobus_free(fep->mii_bus); } } @@ -3277,7 +3260,6 @@ static void fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) { struct device_node *np = pdev->dev.of_node; - int err; *num_tx = *num_rx = 1; @@ -3285,13 +3267,9 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) return; /* parse the num of tx and rx queues */ - err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); - if (err) - *num_tx = 1; + of_property_read_u32(np, "fsl,num-tx-queues", num_tx); - err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); - if (err) - *num_rx = 1; + of_property_read_u32(np, "fsl,num-rx-queues", num_rx); if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index afe7f39cdd7c..25553ee857b4 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -1084,27 +1084,23 @@ static struct platform_driver mpc52xx_fec_driver = { /* Module */ /* ======================================================================== */ +static struct platform_driver * const drivers[] = { +#ifdef CONFIG_FEC_MPC52xx_MDIO + &mpc52xx_fec_mdio_driver, +#endif + &mpc52xx_fec_driver, +}; + static int __init mpc52xx_fec_init(void) { -#ifdef CONFIG_FEC_MPC52xx_MDIO - int ret; - ret = platform_driver_register(&mpc52xx_fec_mdio_driver); - if (ret) { - pr_err("failed to register mdio driver\n"); - return ret; - } -#endif - return platform_driver_register(&mpc52xx_fec_driver); + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit mpc52xx_fec_exit(void) { - platform_driver_unregister(&mpc52xx_fec_driver); -#ifdef CONFIG_FEC_MPC52xx_MDIO - platform_driver_unregister(&mpc52xx_fec_mdio_driver); -#endif + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 1e647beaf989..b5497e308302 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -22,7 +22,6 @@ struct mpc52xx_fec_mdio_priv { struct mpc52xx_fec __iomem *regs; - int mdio_irqs[PHY_MAX_ADDR]; }; static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, @@ -83,9 +82,6 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) bus->read = mpc52xx_fec_mdio_read; bus->write = mpc52xx_fec_mdio_write; - /* setup irqs */ - bus->irq = priv->mdio_irqs; - /* setup registers */ err = of_address_to_resource(np, 0, &res); if (err) diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig new file mode 100644 index 000000000000..79b7c84b7869 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -0,0 +1,9 @@ +config FSL_FMAN + tristate "FMan support" + depends on FSL_SOC || COMPILE_TEST + select GENERIC_ALLOCATOR + select PHYLIB + default n + help + Freescale Data-Path Acceleration Architecture Frame Manager + (FMan) support diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile new file mode 100644 index 000000000000..51fd2e6c1b84 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Makefile @@ -0,0 +1,7 @@ +subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman + +obj-y += fsl_fman.o fsl_fman_mac.o fsl_mac.o + +fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_port.o +fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o +fsl_mac-objs += mac.o diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c new file mode 100644 index 000000000000..623aa1c8ebc6 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -0,0 +1,2871 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman.h" +#include "fman_muram.h" + +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/clk.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/interrupt.h> +#include <linux/libfdt_env.h> + +/* General defines */ +#define FMAN_LIODN_TBL 64 /* size of LIODN table */ +#define MAX_NUM_OF_MACS 10 +#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4 +#define BASE_RX_PORTID 0x08 +#define BASE_TX_PORTID 0x28 + +/* Modules registers offsets */ +#define BMI_OFFSET 0x00080000 +#define QMI_OFFSET 0x00080400 +#define DMA_OFFSET 0x000C2000 +#define FPM_OFFSET 0x000C3000 +#define IMEM_OFFSET 0x000C4000 +#define CGP_OFFSET 0x000DB000 + +/* Exceptions bit map */ +#define EX_DMA_BUS_ERROR 0x80000000 +#define EX_DMA_READ_ECC 0x40000000 +#define EX_DMA_SYSTEM_WRITE_ECC 0x20000000 +#define EX_DMA_FM_WRITE_ECC 0x10000000 +#define EX_FPM_STALL_ON_TASKS 0x08000000 +#define EX_FPM_SINGLE_ECC 0x04000000 +#define EX_FPM_DOUBLE_ECC 0x02000000 +#define EX_QMI_SINGLE_ECC 0x01000000 +#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000 +#define EX_QMI_DOUBLE_ECC 0x00400000 +#define EX_BMI_LIST_RAM_ECC 0x00200000 +#define EX_BMI_STORAGE_PROFILE_ECC 0x00100000 +#define EX_BMI_STATISTICS_RAM_ECC 0x00080000 +#define EX_IRAM_ECC 0x00040000 +#define EX_MURAM_ECC 0x00020000 +#define EX_BMI_DISPATCH_RAM_ECC 0x00010000 +#define EX_DMA_SINGLE_PORT_ECC 0x00008000 + +/* DMA defines */ +/* masks */ +#define DMA_MODE_BER 0x00200000 +#define DMA_MODE_ECC 0x00000020 +#define DMA_MODE_SECURE_PROT 0x00000800 +#define DMA_MODE_AXI_DBG_MASK 0x0F000000 + +#define DMA_TRANSFER_PORTID_MASK 0xFF000000 +#define DMA_TRANSFER_TNUM_MASK 0x00FF0000 +#define DMA_TRANSFER_LIODN_MASK 0x00000FFF + +#define DMA_STATUS_BUS_ERR 0x08000000 +#define DMA_STATUS_READ_ECC 0x04000000 +#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000 +#define DMA_STATUS_FM_WRITE_ECC 0x01000000 +#define DMA_STATUS_FM_SPDAT_ECC 0x00080000 + +#define DMA_MODE_CACHE_OR_SHIFT 30 +#define DMA_MODE_AXI_DBG_SHIFT 24 +#define DMA_MODE_CEN_SHIFT 13 +#define DMA_MODE_CEN_MASK 0x00000007 +#define DMA_MODE_DBG_SHIFT 7 +#define DMA_MODE_AID_MODE_SHIFT 4 + +#define DMA_THRESH_COMMQ_SHIFT 24 +#define DMA_THRESH_READ_INT_BUF_SHIFT 16 +#define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f +#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f + +#define DMA_TRANSFER_PORTID_SHIFT 24 +#define DMA_TRANSFER_TNUM_SHIFT 16 + +#define DMA_CAM_SIZEOF_ENTRY 0x40 +#define DMA_CAM_UNITS 8 + +#define DMA_LIODN_SHIFT 16 +#define DMA_LIODN_BASE_MASK 0x00000FFF + +/* FPM defines */ +#define FPM_EV_MASK_DOUBLE_ECC 0x80000000 +#define FPM_EV_MASK_STALL 0x40000000 +#define FPM_EV_MASK_SINGLE_ECC 0x20000000 +#define FPM_EV_MASK_RELEASE_FM 0x00010000 +#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000 +#define FPM_EV_MASK_STALL_EN 0x00004000 +#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000 +#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008 +#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004 + +#define FPM_RAM_MURAM_ECC 0x00008000 +#define FPM_RAM_IRAM_ECC 0x00004000 +#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000 +#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000 +#define FPM_RAM_IRAM_ECC_EN 0x40000000 +#define FPM_RAM_RAMS_ECC_EN 0x80000000 +#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000 + +#define FPM_REV1_MAJOR_MASK 0x0000FF00 +#define FPM_REV1_MINOR_MASK 0x000000FF + +#define FPM_DISP_LIMIT_SHIFT 24 + +#define FPM_PRT_FM_CTL1 0x00000001 +#define FPM_PRT_FM_CTL2 0x00000002 +#define FPM_PORT_FM_CTL_PORTID_SHIFT 24 +#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16 + +#define FPM_THR1_PRS_SHIFT 24 +#define FPM_THR1_KG_SHIFT 16 +#define FPM_THR1_PLCR_SHIFT 8 +#define FPM_THR1_BMI_SHIFT 0 + +#define FPM_THR2_QMI_ENQ_SHIFT 24 +#define FPM_THR2_QMI_DEQ_SHIFT 0 +#define FPM_THR2_FM_CTL1_SHIFT 16 +#define FPM_THR2_FM_CTL2_SHIFT 8 + +#define FPM_EV_MASK_CAT_ERR_SHIFT 1 +#define FPM_EV_MASK_DMA_ERR_SHIFT 0 + +#define FPM_REV1_MAJOR_SHIFT 8 + +#define FPM_RSTC_FM_RESET 0x80000000 +#define FPM_RSTC_MAC0_RESET 0x40000000 +#define FPM_RSTC_MAC1_RESET 0x20000000 +#define FPM_RSTC_MAC2_RESET 0x10000000 +#define FPM_RSTC_MAC3_RESET 0x08000000 +#define FPM_RSTC_MAC8_RESET 0x04000000 +#define FPM_RSTC_MAC4_RESET 0x02000000 +#define FPM_RSTC_MAC5_RESET 0x01000000 +#define FPM_RSTC_MAC6_RESET 0x00800000 +#define FPM_RSTC_MAC7_RESET 0x00400000 +#define FPM_RSTC_MAC9_RESET 0x00200000 + +#define FPM_TS_INT_SHIFT 16 +#define FPM_TS_CTL_EN 0x80000000 + +/* BMI defines */ +#define BMI_INIT_START 0x80000000 +#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000 +#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000 +#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000 +#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000 +#define BMI_NUM_OF_TASKS_MASK 0x3F000000 +#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000 +#define BMI_NUM_OF_DMAS_MASK 0x00000F00 +#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F +#define BMI_FIFO_SIZE_MASK 0x000003FF +#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000 +#define BMI_CFG2_DMAS_MASK 0x0000003F +#define BMI_CFG2_TASKS_MASK 0x0000003F + +#define BMI_CFG2_TASKS_SHIFT 16 +#define BMI_CFG2_DMAS_SHIFT 0 +#define BMI_CFG1_FIFO_SIZE_SHIFT 16 +#define BMI_NUM_OF_TASKS_SHIFT 24 +#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16 +#define BMI_NUM_OF_DMAS_SHIFT 8 +#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0 + +#define BMI_FIFO_ALIGN 0x100 + +#define BMI_EXTRA_FIFO_SIZE_SHIFT 16 + +/* QMI defines */ +#define QMI_CFG_ENQ_EN 0x80000000 +#define QMI_CFG_DEQ_EN 0x40000000 +#define QMI_CFG_EN_COUNTERS 0x10000000 +#define QMI_CFG_DEQ_MASK 0x0000003F +#define QMI_CFG_ENQ_MASK 0x00003F00 +#define QMI_CFG_ENQ_SHIFT 8 + +#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000 +#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000 +#define QMI_INTR_EN_SINGLE_ECC 0x80000000 + +#define QMI_GS_HALT_NOT_BUSY 0x00000002 + +/* IRAM defines */ +#define IRAM_IADD_AIE 0x80000000 +#define IRAM_READY 0x80000000 + +/* Default values */ +#define DEFAULT_CATASTROPHIC_ERR 0 +#define DEFAULT_DMA_ERR 0 +#define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM +#define DEFAULT_DMA_COMM_Q_LOW 0x2A +#define DEFAULT_DMA_COMM_Q_HIGH 0x3F +#define DEFAULT_CACHE_OVERRIDE 0 +#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64 +#define DEFAULT_DMA_DBG_CNT_MODE 0 +#define DEFAULT_DMA_SOS_EMERGENCY 0 +#define DEFAULT_DMA_WATCHDOG 0 +#define DEFAULT_DISP_LIMIT 0 +#define DEFAULT_PRS_DISP_TH 16 +#define DEFAULT_PLCR_DISP_TH 16 +#define DEFAULT_KG_DISP_TH 16 +#define DEFAULT_BMI_DISP_TH 16 +#define DEFAULT_QMI_ENQ_DISP_TH 16 +#define DEFAULT_QMI_DEQ_DISP_TH 16 +#define DEFAULT_FM_CTL1_DISP_TH 16 +#define DEFAULT_FM_CTL2_DISP_TH 16 + +#define DFLT_AXI_DBG_NUM_OF_BEATS 1 + +#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \ + ((dma_thresh_max_buf + 1) / 2) +#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \ + ((dma_thresh_max_buf + 1) * 3 / 4) +#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \ + ((dma_thresh_max_buf + 1) / 2) +#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\ + ((dma_thresh_max_buf + 1) * 3 / 4) + +#define DMA_COMM_Q_LOW_FMAN_V3 0x2A +#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \ + ((dma_thresh_max_commq + 1) / 2) +#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \ + ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \ + DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq)) + +#define DMA_COMM_Q_HIGH_FMAN_V3 0x3f +#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \ + ((dma_thresh_max_commq + 1) * 3 / 4) +#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \ + ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \ + DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq)) + +#define TOTAL_NUM_OF_TASKS_FMAN_V3L 59 +#define TOTAL_NUM_OF_TASKS_FMAN_V3H 124 +#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \ + ((major == 6) ? ((minor == 1 || minor == 4) ? \ + TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \ + bmi_max_num_of_tasks) + +#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64 +#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32 +#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \ + (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \ + DMA_CAM_NUM_OF_ENTRIES_FMAN_V2) + +#define FM_TIMESTAMP_1_USEC_BIT 8 + +/* Defines used for enabling/disabling FMan interrupts */ +#define ERR_INTR_EN_DMA 0x00010000 +#define ERR_INTR_EN_FPM 0x80000000 +#define ERR_INTR_EN_BMI 0x00800000 +#define ERR_INTR_EN_QMI 0x00400000 +#define ERR_INTR_EN_MURAM 0x00040000 +#define ERR_INTR_EN_MAC0 0x00004000 +#define ERR_INTR_EN_MAC1 0x00002000 +#define ERR_INTR_EN_MAC2 0x00001000 +#define ERR_INTR_EN_MAC3 0x00000800 +#define ERR_INTR_EN_MAC4 0x00000400 +#define ERR_INTR_EN_MAC5 0x00000200 +#define ERR_INTR_EN_MAC6 0x00000100 +#define ERR_INTR_EN_MAC7 0x00000080 +#define ERR_INTR_EN_MAC8 0x00008000 +#define ERR_INTR_EN_MAC9 0x00000040 + +#define INTR_EN_QMI 0x40000000 +#define INTR_EN_MAC0 0x00080000 +#define INTR_EN_MAC1 0x00040000 +#define INTR_EN_MAC2 0x00020000 +#define INTR_EN_MAC3 0x00010000 +#define INTR_EN_MAC4 0x00000040 +#define INTR_EN_MAC5 0x00000020 +#define INTR_EN_MAC6 0x00000008 +#define INTR_EN_MAC7 0x00000002 +#define INTR_EN_MAC8 0x00200000 +#define INTR_EN_MAC9 0x00100000 +#define INTR_EN_REV0 0x00008000 +#define INTR_EN_REV1 0x00004000 +#define INTR_EN_REV2 0x00002000 +#define INTR_EN_REV3 0x00001000 +#define INTR_EN_TMR 0x01000000 + +enum fman_dma_aid_mode { + FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */ + FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */ +}; + +struct fman_iram_regs { + u32 iadd; /* FM IRAM instruction address register */ + u32 idata; /* FM IRAM instruction data register */ + u32 itcfg; /* FM IRAM timing config register */ + u32 iready; /* FM IRAM ready register */ +}; + +struct fman_fpm_regs { + u32 fmfp_tnc; /* FPM TNUM Control 0x00 */ + u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */ + u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */ + u32 fmfp_mxd; /* FPM Flush Control 0x0c */ + u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */ + u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */ + u32 fm_epi; /* FM Error Pending Interrupts 0x18 */ + u32 fm_rie; /* FM Error Interrupt Enable 0x1c */ + u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */ + u32 res0030[4]; /* res 0x30 - 0x3f */ + u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */ + u32 res0050[4]; /* res 0x50-0x5f */ + u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */ + u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */ + u32 fmfp_tsp; /* FPM Time Stamp 0x68 */ + u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */ + u32 fm_rcr; /* FM Rams Control 0x70 */ + u32 fmfp_extc; /* FPM External Requests Control 0x74 */ + u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */ + u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */ + u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */ + u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */ + u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */ + u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */ + u32 fm_rstc; /* FM Reset Command 0xcc */ + u32 fm_cld; /* FM Classifier Debug 0xd0 */ + u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */ + u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */ + u32 fmfp_ee; /* FPM Event&Mask 0xdc */ + u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */ + u32 res00f0[4]; /* res 0xf0-0xff */ + u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */ + u32 res01c8[14]; /* res 0x1c8-0x1ff */ + u32 fmfp_clfabc; /* FPM CLFABC 0x200 */ + u32 fmfp_clfcc; /* FPM CLFCC 0x204 */ + u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */ + u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */ + u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */ + u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */ + u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */ + u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */ + u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */ + u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */ + u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */ + u32 fmfp_decceh; /* FPM DECCEH 0x22c */ + u32 res0230[116]; /* res 0x230 - 0x3ff */ + u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */ + u32 res0600[0x400 - 384]; +}; + +struct fman_bmi_regs { + u32 fmbm_init; /* BMI Initialization 0x00 */ + u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */ + u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */ + u32 res000c[5]; /* 0x0c - 0x1f */ + u32 fmbm_ievr; /* Interrupt Event Register 0x20 */ + u32 fmbm_ier; /* Interrupt Enable Register 0x24 */ + u32 fmbm_ifr; /* Interrupt Force Register 0x28 */ + u32 res002c[5]; /* 0x2c - 0x3f */ + u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */ + u32 res0060[12]; /* 0x60 - 0x8f */ + u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */ + u32 res009c; /* 0x9c */ + u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */ + u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */ + u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */ + u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */ + u32 res0200; /* 0x200 */ + u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */ + u32 res0300; /* 0x300 */ + u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */ +}; + +struct fman_qmi_regs { + u32 fmqm_gc; /* General Configuration Register 0x00 */ + u32 res0004; /* 0x04 */ + u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */ + u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */ + u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */ + u32 fmqm_ie; /* Interrupt Event Register 0x14 */ + u32 fmqm_ien; /* Interrupt Enable Register 0x18 */ + u32 fmqm_if; /* Interrupt Force Register 0x1c */ + u32 fmqm_gs; /* Global Status Register 0x20 */ + u32 fmqm_ts; /* Task Status Register 0x24 */ + u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */ + u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */ + u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */ + u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */ + u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */ + u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */ + u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */ + u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */ + u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */ + u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */ + u32 res0050[7]; /* 0x50 - 0x6b */ + u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */ + u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */ + u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */ + u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */ + u32 res007c; /* 0x7c */ + u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */ + u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */ + u32 res0088[2]; /* 0x88 - 0x8f */ + struct { + u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */ + u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */ + u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */ + u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */ + u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */ + u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */ + u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */ + u32 res001c; /* 0x1c */ + } dbg_traps[3]; /* 0x90 - 0xef */ + u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */ +}; + +struct fman_dma_regs { + u32 fmdmsr; /* FM DMA status register 0x00 */ + u32 fmdmmr; /* FM DMA mode register 0x04 */ + u32 fmdmtr; /* FM DMA bus threshold register 0x08 */ + u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */ + u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */ + u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */ + u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */ + u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */ + u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */ + u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */ + u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */ + u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */ + u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */ + u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */ + u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */ + u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */ + u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */ + u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */ + u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */ + u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */ + u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */ + u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */ + u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */ + u32 res005c; /* 0x5c */ + u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */ + u32 res00e0[0x400 - 56]; +}; + +/* Structure that holds current FMan state. + * Used for saving run time information. + */ +struct fman_state_struct { + u8 fm_id; + u16 fm_clk_freq; + struct fman_rev_info rev_info; + bool enabled_time_stamp; + u8 count1_micro_bit; + u8 total_num_of_tasks; + u8 accumulated_num_of_tasks; + u32 accumulated_fifo_size; + u8 accumulated_num_of_open_dmas; + u8 accumulated_num_of_deq_tnums; + u32 exceptions; + u32 extra_fifo_pool_size; + u8 extra_tasks_pool_size; + u8 extra_open_dmas_pool_size; + u16 port_mfl[MAX_NUM_OF_MACS]; + u16 mac_mfl[MAX_NUM_OF_MACS]; + + /* SOC specific */ + u32 fm_iram_size; + /* DMA */ + u32 dma_thresh_max_commq; + u32 dma_thresh_max_buf; + u32 max_num_of_open_dmas; + /* QMI */ + u32 qmi_max_num_of_tnums; + u32 qmi_def_tnums_thresh; + /* BMI */ + u32 bmi_max_num_of_tasks; + u32 bmi_max_fifo_size; + /* General */ + u32 fm_port_num_of_cg; + u32 num_of_rx_ports; + u32 total_fifo_size; + + u32 qman_channel_base; + u32 num_of_qman_channels; + + struct resource *res; +}; + +/* Structure that holds FMan initial configuration */ +struct fman_cfg { + u8 disp_limit_tsh; + u8 prs_disp_tsh; + u8 plcr_disp_tsh; + u8 kg_disp_tsh; + u8 bmi_disp_tsh; + u8 qmi_enq_disp_tsh; + u8 qmi_deq_disp_tsh; + u8 fm_ctl1_disp_tsh; + u8 fm_ctl2_disp_tsh; + int dma_cache_override; + enum fman_dma_aid_mode dma_aid_mode; + u32 dma_axi_dbg_num_of_beats; + u32 dma_cam_num_of_entries; + u32 dma_watchdog; + u8 dma_comm_qtsh_asrt_emer; + u32 dma_write_buf_tsh_asrt_emer; + u32 dma_read_buf_tsh_asrt_emer; + u8 dma_comm_qtsh_clr_emer; + u32 dma_write_buf_tsh_clr_emer; + u32 dma_read_buf_tsh_clr_emer; + u32 dma_sos_emergency; + int dma_dbg_cnt_mode; + int catastrophic_err; + int dma_err; + u32 exceptions; + u16 clk_freq; + u32 cam_base_addr; + u32 fifo_base_addr; + u32 total_fifo_size; + u32 total_num_of_tasks; + u32 qmi_def_tnums_thresh; +}; + +/* Structure that holds information received from device tree */ +struct fman_dts_params { + void __iomem *base_addr; /* FMan virtual address */ + struct resource *res; /* FMan memory resource */ + u8 id; /* FMan ID */ + + int err_irq; /* FMan Error IRQ */ + + u16 clk_freq; /* FMan clock freq (In Mhz) */ + + u32 qman_channel_base; /* QMan channels base */ + u32 num_of_qman_channels; /* Number of QMan channels */ + + struct resource muram_res; /* MURAM resource */ +}; + +/** fman_exceptions_cb + * fman - Pointer to FMan + * exception - The exception. + * + * Exceptions user callback routine, will be called upon an exception + * passing the exception identification. + * + * Return: irq status + */ +typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman, + enum fman_exceptions exception); + +/** fman_bus_error_cb + * fman - Pointer to FMan + * port_id - Port id + * addr - Address that caused the error + * tnum - Owner of error + * liodn - Logical IO device number + * + * Bus error user callback routine, will be called upon bus error, + * passing parameters describing the errors and the owner. + * + * Return: IRQ status + */ +typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id, + u64 addr, u8 tnum, u16 liodn); + +struct fman { + struct device *dev; + void __iomem *base_addr; + struct fman_intr_src intr_mng[FMAN_EV_CNT]; + + struct fman_fpm_regs __iomem *fpm_regs; + struct fman_bmi_regs __iomem *bmi_regs; + struct fman_qmi_regs __iomem *qmi_regs; + struct fman_dma_regs __iomem *dma_regs; + fman_exceptions_cb *exception_cb; + fman_bus_error_cb *bus_error_cb; + /* Spinlock for FMan use */ + spinlock_t spinlock; + struct fman_state_struct *state; + + struct fman_cfg *cfg; + struct muram_info *muram; + /* cam section in muram */ + int cam_offset; + size_t cam_size; + /* Fifo in MURAM */ + int fifo_offset; + size_t fifo_size; + + u32 liodn_base[64]; + u32 liodn_offset[64]; + + struct fman_dts_params dts_params; +}; + +static irqreturn_t fman_exceptions(struct fman *fman, + enum fman_exceptions exception) +{ + dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n", + __func__, fman->state->fm_id, exception); + + return IRQ_HANDLED; +} + +static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id, + u64 __maybe_unused addr, + u8 __maybe_unused tnum, + u16 __maybe_unused liodn) +{ + dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n", + __func__, fman->state->fm_id, port_id); + + return IRQ_HANDLED; +} + +static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id) +{ + if (fman->intr_mng[id].isr_cb) { + fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id) +{ + u8 sw_port_id = 0; + + if (hw_port_id >= BASE_TX_PORTID) + sw_port_id = hw_port_id - BASE_TX_PORTID; + else if (hw_port_id >= BASE_RX_PORTID) + sw_port_id = hw_port_id - BASE_RX_PORTID; + else + sw_port_id = 0; + + return sw_port_id; +} + +static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg, + u8 port_id) +{ + u32 tmp = 0; + + tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT; + + tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1; + + /* order restoration */ + if (port_id % 2) + tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; + else + tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; + + iowrite32be(tmp, &fpm_rg->fmfp_prc); +} + +static void set_port_liodn(struct fman *fman, u8 port_id, + u32 liodn_base, u32 liodn_ofst) +{ + u32 tmp; + + /* set LIODN base for this port */ + tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]); + if (port_id % 2) { + tmp &= ~DMA_LIODN_BASE_MASK; + tmp |= liodn_base; + } else { + tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT); + tmp |= liodn_base << DMA_LIODN_SHIFT; + } + iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]); + iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]); +} + +static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) +{ + u32 tmp; + + tmp = ioread32be(&fpm_rg->fm_rcr); + if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) + iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); + else + iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN | + FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); +} + +static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) +{ + u32 tmp; + + tmp = ioread32be(&fpm_rg->fm_rcr); + if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) + iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); + else + iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN), + &fpm_rg->fm_rcr); +} + +static void fman_defconfig(struct fman_cfg *cfg) +{ + memset(cfg, 0, sizeof(struct fman_cfg)); + + cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR; + cfg->dma_err = DEFAULT_DMA_ERR; + cfg->dma_aid_mode = DEFAULT_AID_MODE; + cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW; + cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH; + cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE; + cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES; + cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE; + cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY; + cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG; + cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT; + cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH; + cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH; + cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH; + cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH; + cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH; + cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH; + cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH; + cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH; +} + +static int dma_init(struct fman *fman) +{ + struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; + struct fman_cfg *cfg = fman->cfg; + u32 tmp_reg; + + /* Init DMA Registers */ + + /* clear status reg events */ + tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC | + DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC); + iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr); + + /* configure mode register */ + tmp_reg = 0; + tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT; + if (cfg->exceptions & EX_DMA_BUS_ERROR) + tmp_reg |= DMA_MODE_BER; + if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) | + (cfg->exceptions & EX_DMA_READ_ECC) | + (cfg->exceptions & EX_DMA_FM_WRITE_ECC)) + tmp_reg |= DMA_MODE_ECC; + if (cfg->dma_axi_dbg_num_of_beats) + tmp_reg |= (DMA_MODE_AXI_DBG_MASK & + ((cfg->dma_axi_dbg_num_of_beats - 1) + << DMA_MODE_AXI_DBG_SHIFT)); + + tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) & + DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT; + tmp_reg |= DMA_MODE_SECURE_PROT; + tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT; + tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT; + + iowrite32be(tmp_reg, &dma_rg->fmdmmr); + + /* configure thresholds register */ + tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer << + DMA_THRESH_COMMQ_SHIFT); + tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer & + DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; + tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer & + DMA_THRESH_WRITE_INT_BUF_MASK; + + iowrite32be(tmp_reg, &dma_rg->fmdmtr); + + /* configure hysteresis register */ + tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer << + DMA_THRESH_COMMQ_SHIFT); + tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer & + DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; + tmp_reg |= cfg->dma_write_buf_tsh_clr_emer & + DMA_THRESH_WRITE_INT_BUF_MASK; + + iowrite32be(tmp_reg, &dma_rg->fmdmhy); + + /* configure emergency threshold */ + iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr); + + /* configure Watchdog */ + iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr); + + iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr); + + /* Allocate MURAM for CAM */ + fman->cam_size = + (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY); + fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size); + if (IS_ERR_VALUE(fman->cam_offset)) { + dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", + __func__); + return -ENOMEM; + } + + if (fman->state->rev_info.major == 2) { + u32 __iomem *cam_base_addr; + + fman_muram_free_mem(fman->muram, fman->cam_offset, + fman->cam_size); + + fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128; + fman->cam_offset = fman_muram_alloc(fman->muram, + fman->cam_size); + if (IS_ERR_VALUE(fman->cam_offset)) { + dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", + __func__); + return -ENOMEM; + } + + if (fman->cfg->dma_cam_num_of_entries % 8 || + fman->cfg->dma_cam_num_of_entries > 32) { + dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n", + __func__); + return -EINVAL; + } + + cam_base_addr = (u32 __iomem *) + fman_muram_offset_to_vbase(fman->muram, + fman->cam_offset); + iowrite32be(~((1 << + (32 - fman->cfg->dma_cam_num_of_entries)) - 1), + cam_base_addr); + } + + fman->cfg->cam_base_addr = fman->cam_offset; + + return 0; +} + +static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg) +{ + u32 tmp_reg; + int i; + + /* Init FPM Registers */ + + tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT); + iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd); + + tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) | + ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) | + ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) | + ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT)); + iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1); + + tmp_reg = + (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) | + ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) | + ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) | + ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT)); + iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2); + + /* define exceptions and error behavior */ + tmp_reg = 0; + /* Clear events */ + tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC | + FPM_EV_MASK_SINGLE_ECC); + /* enable interrupts */ + if (cfg->exceptions & EX_FPM_STALL_ON_TASKS) + tmp_reg |= FPM_EV_MASK_STALL_EN; + if (cfg->exceptions & EX_FPM_SINGLE_ECC) + tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN; + if (cfg->exceptions & EX_FPM_DOUBLE_ECC) + tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN; + tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT); + tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT); + /* FMan is not halted upon external halt activation */ + tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT; + /* Man is not halted upon Unrecoverable ECC error behavior */ + tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT; + iowrite32be(tmp_reg, &fpm_rg->fmfp_ee); + + /* clear all fmCtls event registers */ + for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++) + iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]); + + /* RAM ECC - enable and clear events */ + /* first we need to clear all parser memory, + * as it is uninitialized and may cause ECC errors + */ + /* event bits */ + tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC); + + iowrite32be(tmp_reg, &fpm_rg->fm_rcr); + + tmp_reg = 0; + if (cfg->exceptions & EX_IRAM_ECC) { + tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN; + enable_rams_ecc(fpm_rg); + } + if (cfg->exceptions & EX_MURAM_ECC) { + tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN; + enable_rams_ecc(fpm_rg); + } + iowrite32be(tmp_reg, &fpm_rg->fm_rie); +} + +static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg, + struct fman_cfg *cfg) +{ + u32 tmp_reg; + + /* Init BMI Registers */ + + /* define common resources */ + tmp_reg = cfg->fifo_base_addr; + tmp_reg = tmp_reg / BMI_FIFO_ALIGN; + + tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) << + BMI_CFG1_FIFO_SIZE_SHIFT); + iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1); + + tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) << + BMI_CFG2_TASKS_SHIFT; + /* num of DMA's will be dynamically updated when each port is set */ + iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2); + + /* define unmaskable exceptions, enable and clear events */ + tmp_reg = 0; + iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC | + BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC | + BMI_ERR_INTR_EN_STATISTICS_RAM_ECC | + BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr); + + if (cfg->exceptions & EX_BMI_LIST_RAM_ECC) + tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC; + if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC) + tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; + if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC) + tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC) + tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + iowrite32be(tmp_reg, &bmi_rg->fmbm_ier); +} + +static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg, + struct fman_cfg *cfg) +{ + u32 tmp_reg; + + /* Init QMI Registers */ + + /* Clear error interrupt events */ + + iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF, + &qmi_rg->fmqm_eie); + tmp_reg = 0; + if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID) + tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; + if (cfg->exceptions & EX_QMI_DOUBLE_ECC) + tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC; + /* enable events */ + iowrite32be(tmp_reg, &qmi_rg->fmqm_eien); + + tmp_reg = 0; + /* Clear interrupt events */ + iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie); + if (cfg->exceptions & EX_QMI_SINGLE_ECC) + tmp_reg |= QMI_INTR_EN_SINGLE_ECC; + /* enable events */ + iowrite32be(tmp_reg, &qmi_rg->fmqm_ien); +} + +static int enable(struct fman *fman, struct fman_cfg *cfg) +{ + u32 cfg_reg = 0; + + /* Enable all modules */ + + /* clear&enable global counters - calculate reg and save for later, + * because it's the same reg for QMI enable + */ + cfg_reg = QMI_CFG_EN_COUNTERS; + + /* Set enqueue and dequeue thresholds */ + cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh; + + iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init); + iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN, + &fman->qmi_regs->fmqm_gc); + + return 0; +} + +static int set_exception(struct fman *fman, + enum fman_exceptions exception, bool enable) +{ + u32 tmp; + + switch (exception) { + case FMAN_EX_DMA_BUS_ERROR: + tmp = ioread32be(&fman->dma_regs->fmdmmr); + if (enable) + tmp |= DMA_MODE_BER; + else + tmp &= ~DMA_MODE_BER; + /* disable bus error */ + iowrite32be(tmp, &fman->dma_regs->fmdmmr); + break; + case FMAN_EX_DMA_READ_ECC: + case FMAN_EX_DMA_SYSTEM_WRITE_ECC: + case FMAN_EX_DMA_FM_WRITE_ECC: + tmp = ioread32be(&fman->dma_regs->fmdmmr); + if (enable) + tmp |= DMA_MODE_ECC; + else + tmp &= ~DMA_MODE_ECC; + iowrite32be(tmp, &fman->dma_regs->fmdmmr); + break; + case FMAN_EX_FPM_STALL_ON_TASKS: + tmp = ioread32be(&fman->fpm_regs->fmfp_ee); + if (enable) + tmp |= FPM_EV_MASK_STALL_EN; + else + tmp &= ~FPM_EV_MASK_STALL_EN; + iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); + break; + case FMAN_EX_FPM_SINGLE_ECC: + tmp = ioread32be(&fman->fpm_regs->fmfp_ee); + if (enable) + tmp |= FPM_EV_MASK_SINGLE_ECC_EN; + else + tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN; + iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); + break; + case FMAN_EX_FPM_DOUBLE_ECC: + tmp = ioread32be(&fman->fpm_regs->fmfp_ee); + if (enable) + tmp |= FPM_EV_MASK_DOUBLE_ECC_EN; + else + tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN; + iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); + break; + case FMAN_EX_QMI_SINGLE_ECC: + tmp = ioread32be(&fman->qmi_regs->fmqm_ien); + if (enable) + tmp |= QMI_INTR_EN_SINGLE_ECC; + else + tmp &= ~QMI_INTR_EN_SINGLE_ECC; + iowrite32be(tmp, &fman->qmi_regs->fmqm_ien); + break; + case FMAN_EX_QMI_DOUBLE_ECC: + tmp = ioread32be(&fman->qmi_regs->fmqm_eien); + if (enable) + tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC; + else + tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC; + iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); + break; + case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: + tmp = ioread32be(&fman->qmi_regs->fmqm_eien); + if (enable) + tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; + else + tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF; + iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); + break; + case FMAN_EX_BMI_LIST_RAM_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_BMI_STORAGE_PROFILE_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_BMI_STATISTICS_RAM_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_BMI_DISPATCH_RAM_ECC: + tmp = ioread32be(&fman->bmi_regs->fmbm_ier); + if (enable) + tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + else + tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); + break; + case FMAN_EX_IRAM_ECC: + tmp = ioread32be(&fman->fpm_regs->fm_rie); + if (enable) { + /* enable ECC if not enabled */ + enable_rams_ecc(fman->fpm_regs); + /* enable ECC interrupts */ + tmp |= FPM_IRAM_ECC_ERR_EX_EN; + } else { + /* ECC mechanism may be disabled, + * depending on driver status + */ + disable_rams_ecc(fman->fpm_regs); + tmp &= ~FPM_IRAM_ECC_ERR_EX_EN; + } + iowrite32be(tmp, &fman->fpm_regs->fm_rie); + break; + case FMAN_EX_MURAM_ECC: + tmp = ioread32be(&fman->fpm_regs->fm_rie); + if (enable) { + /* enable ECC if not enabled */ + enable_rams_ecc(fman->fpm_regs); + /* enable ECC interrupts */ + tmp |= FPM_MURAM_ECC_ERR_EX_EN; + } else { + /* ECC mechanism may be disabled, + * depending on driver status + */ + disable_rams_ecc(fman->fpm_regs); + tmp &= ~FPM_MURAM_ECC_ERR_EX_EN; + } + iowrite32be(tmp, &fman->fpm_regs->fm_rie); + break; + default: + return -EINVAL; + } + return 0; +} + +static void resume(struct fman_fpm_regs __iomem *fpm_rg) +{ + u32 tmp; + + tmp = ioread32be(&fpm_rg->fmfp_ee); + /* clear tmp_reg event bits in order not to clear standing events */ + tmp &= ~(FPM_EV_MASK_DOUBLE_ECC | + FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC); + tmp |= FPM_EV_MASK_RELEASE_FM; + + iowrite32be(tmp, &fpm_rg->fmfp_ee); +} + +static int fill_soc_specific_params(struct fman_state_struct *state) +{ + u8 minor = state->rev_info.minor; + /* P4080 - Major 2 + * P2041/P3041/P5020/P5040 - Major 3 + * Tx/Bx - Major 6 + */ + switch (state->rev_info.major) { + case 3: + state->bmi_max_fifo_size = 160 * 1024; + state->fm_iram_size = 64 * 1024; + state->dma_thresh_max_commq = 31; + state->dma_thresh_max_buf = 127; + state->qmi_max_num_of_tnums = 64; + state->qmi_def_tnums_thresh = 48; + state->bmi_max_num_of_tasks = 128; + state->max_num_of_open_dmas = 32; + state->fm_port_num_of_cg = 256; + state->num_of_rx_ports = 6; + state->total_fifo_size = 122 * 1024; + break; + + case 2: + state->bmi_max_fifo_size = 160 * 1024; + state->fm_iram_size = 64 * 1024; + state->dma_thresh_max_commq = 31; + state->dma_thresh_max_buf = 127; + state->qmi_max_num_of_tnums = 64; + state->qmi_def_tnums_thresh = 48; + state->bmi_max_num_of_tasks = 128; + state->max_num_of_open_dmas = 32; + state->fm_port_num_of_cg = 256; + state->num_of_rx_ports = 5; + state->total_fifo_size = 100 * 1024; + break; + + case 6: + state->dma_thresh_max_commq = 83; + state->dma_thresh_max_buf = 127; + state->qmi_max_num_of_tnums = 64; + state->qmi_def_tnums_thresh = 32; + state->fm_port_num_of_cg = 256; + + /* FManV3L */ + if (minor == 1 || minor == 4) { + state->bmi_max_fifo_size = 192 * 1024; + state->bmi_max_num_of_tasks = 64; + state->max_num_of_open_dmas = 32; + state->num_of_rx_ports = 5; + if (minor == 1) + state->fm_iram_size = 32 * 1024; + else + state->fm_iram_size = 64 * 1024; + state->total_fifo_size = 156 * 1024; + } + /* FManV3H */ + else if (minor == 0 || minor == 2 || minor == 3) { + state->bmi_max_fifo_size = 384 * 1024; + state->fm_iram_size = 64 * 1024; + state->bmi_max_num_of_tasks = 128; + state->max_num_of_open_dmas = 84; + state->num_of_rx_ports = 8; + state->total_fifo_size = 295 * 1024; + } else { + pr_err("Unsupported FManv3 version\n"); + return -EINVAL; + } + + break; + default: + pr_err("Unsupported FMan version\n"); + return -EINVAL; + } + + return 0; +} + +static bool is_init_done(struct fman_cfg *cfg) +{ + /* Checks if FMan driver parameters were initialized */ + if (!cfg) + return true; + + return false; +} + +static void free_init_resources(struct fman *fman) +{ + if (fman->cam_offset) + fman_muram_free_mem(fman->muram, fman->cam_offset, + fman->cam_size); + if (fman->fifo_offset) + fman_muram_free_mem(fman->muram, fman->fifo_offset, + fman->fifo_size); +} + +static irqreturn_t bmi_err_event(struct fman *fman) +{ + u32 event, mask, force; + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&bmi_rg->fmbm_ievr); + mask = ioread32be(&bmi_rg->fmbm_ier); + event &= mask; + /* clear the forced events */ + force = ioread32be(&bmi_rg->fmbm_ifr); + if (force & event) + iowrite32be(force & ~event, &bmi_rg->fmbm_ifr); + /* clear the acknowledged events */ + iowrite32be(event, &bmi_rg->fmbm_ievr); + + if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC); + if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC); + if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC); + if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC) + ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC); + + return ret; +} + +static irqreturn_t qmi_err_event(struct fman *fman) +{ + u32 event, mask, force; + struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&qmi_rg->fmqm_eie); + mask = ioread32be(&qmi_rg->fmqm_eien); + event &= mask; + + /* clear the forced events */ + force = ioread32be(&qmi_rg->fmqm_eif); + if (force & event) + iowrite32be(force & ~event, &qmi_rg->fmqm_eif); + /* clear the acknowledged events */ + iowrite32be(event, &qmi_rg->fmqm_eie); + + if (event & QMI_ERR_INTR_EN_DOUBLE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC); + if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF) + ret = fman->exception_cb(fman, + FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID); + + return ret; +} + +static irqreturn_t dma_err_event(struct fman *fman) +{ + u32 status, mask, com_id; + u8 tnum, port_id, relative_port_id; + u16 liodn; + struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; + irqreturn_t ret = IRQ_NONE; + + status = ioread32be(&dma_rg->fmdmsr); + mask = ioread32be(&dma_rg->fmdmmr); + + /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */ + if ((mask & DMA_MODE_BER) != DMA_MODE_BER) + status &= ~DMA_STATUS_BUS_ERR; + + /* clear relevant bits if mask has no DMA_MODE_ECC */ + if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC) + status &= ~(DMA_STATUS_FM_SPDAT_ECC | + DMA_STATUS_READ_ECC | + DMA_STATUS_SYSTEM_WRITE_ECC | + DMA_STATUS_FM_WRITE_ECC); + + /* clear set events */ + iowrite32be(status, &dma_rg->fmdmsr); + + if (status & DMA_STATUS_BUS_ERR) { + u64 addr; + + addr = (u64)ioread32be(&dma_rg->fmdmtal); + addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32); + + com_id = ioread32be(&dma_rg->fmdmtcid); + port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >> + DMA_TRANSFER_PORTID_SHIFT)); + relative_port_id = + hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); + tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >> + DMA_TRANSFER_TNUM_SHIFT); + liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK); + ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum, + liodn); + } + if (status & DMA_STATUS_FM_SPDAT_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC); + if (status & DMA_STATUS_READ_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC); + if (status & DMA_STATUS_SYSTEM_WRITE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC); + if (status & DMA_STATUS_FM_WRITE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC); + + return ret; +} + +static irqreturn_t fpm_err_event(struct fman *fman) +{ + u32 event; + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&fpm_rg->fmfp_ee); + /* clear the all occurred events */ + iowrite32be(event, &fpm_rg->fmfp_ee); + + if ((event & FPM_EV_MASK_DOUBLE_ECC) && + (event & FPM_EV_MASK_DOUBLE_ECC_EN)) + ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC); + if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN)) + ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS); + if ((event & FPM_EV_MASK_SINGLE_ECC) && + (event & FPM_EV_MASK_SINGLE_ECC_EN)) + ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC); + + return ret; +} + +static irqreturn_t muram_err_intr(struct fman *fman) +{ + u32 event, mask; + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&fpm_rg->fm_rcr); + mask = ioread32be(&fpm_rg->fm_rie); + + /* clear MURAM event bit (do not clear IRAM event) */ + iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr); + + if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC)) + ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC); + + return ret; +} + +static irqreturn_t qmi_event(struct fman *fman) +{ + u32 event, mask, force; + struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; + irqreturn_t ret = IRQ_NONE; + + event = ioread32be(&qmi_rg->fmqm_ie); + mask = ioread32be(&qmi_rg->fmqm_ien); + event &= mask; + /* clear the forced events */ + force = ioread32be(&qmi_rg->fmqm_if); + if (force & event) + iowrite32be(force & ~event, &qmi_rg->fmqm_if); + /* clear the acknowledged events */ + iowrite32be(event, &qmi_rg->fmqm_ie); + + if (event & QMI_INTR_EN_SINGLE_ECC) + ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC); + + return ret; +} + +static void enable_time_stamp(struct fman *fman) +{ + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + u16 fm_clk_freq = fman->state->fm_clk_freq; + u32 tmp, intgr, ts_freq; + u64 frac; + + ts_freq = (u32)(1 << fman->state->count1_micro_bit); + /* configure timestamp so that bit 8 will count 1 microsecond + * Find effective count rate at TIMESTAMP least significant bits: + * Effective_Count_Rate = 1MHz x 2^8 = 256MHz + * Find frequency ratio between effective count rate and the clock: + * Effective_Count_Rate / CLK e.g. for 600 MHz clock: + * 256/600 = 0.4266666... + */ + + intgr = ts_freq / fm_clk_freq; + /* we multiply by 2^16 to keep the fraction of the division + * we do not div back, since we write this value as a fraction + * see spec + */ + + frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq; + /* we check remainder of the division in order to round up if not int */ + if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq) + frac++; + + tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac; + iowrite32be(tmp, &fpm_rg->fmfp_tsc2); + + /* enable timestamp with original clock */ + iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1); + fman->state->enabled_time_stamp = true; +} + +static int clear_iram(struct fman *fman) +{ + struct fman_iram_regs __iomem *iram; + int i, count; + + iram = fman->base_addr + IMEM_OFFSET; + + /* Enable the auto-increment */ + iowrite32be(IRAM_IADD_AIE, &iram->iadd); + count = 100; + do { + udelay(1); + } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count); + if (count == 0) + return -EBUSY; + + for (i = 0; i < (fman->state->fm_iram_size / 4); i++) + iowrite32be(0xffffffff, &iram->idata); + + iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd); + count = 100; + do { + udelay(1); + } while ((ioread32be(&iram->idata) != 0xffffffff) && --count); + if (count == 0) + return -EBUSY; + + return 0; +} + +static u32 get_exception_flag(enum fman_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FMAN_EX_DMA_BUS_ERROR: + bit_mask = EX_DMA_BUS_ERROR; + break; + case FMAN_EX_DMA_SINGLE_PORT_ECC: + bit_mask = EX_DMA_SINGLE_PORT_ECC; + break; + case FMAN_EX_DMA_READ_ECC: + bit_mask = EX_DMA_READ_ECC; + break; + case FMAN_EX_DMA_SYSTEM_WRITE_ECC: + bit_mask = EX_DMA_SYSTEM_WRITE_ECC; + break; + case FMAN_EX_DMA_FM_WRITE_ECC: + bit_mask = EX_DMA_FM_WRITE_ECC; + break; + case FMAN_EX_FPM_STALL_ON_TASKS: + bit_mask = EX_FPM_STALL_ON_TASKS; + break; + case FMAN_EX_FPM_SINGLE_ECC: + bit_mask = EX_FPM_SINGLE_ECC; + break; + case FMAN_EX_FPM_DOUBLE_ECC: + bit_mask = EX_FPM_DOUBLE_ECC; + break; + case FMAN_EX_QMI_SINGLE_ECC: + bit_mask = EX_QMI_SINGLE_ECC; + break; + case FMAN_EX_QMI_DOUBLE_ECC: + bit_mask = EX_QMI_DOUBLE_ECC; + break; + case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: + bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID; + break; + case FMAN_EX_BMI_LIST_RAM_ECC: + bit_mask = EX_BMI_LIST_RAM_ECC; + break; + case FMAN_EX_BMI_STORAGE_PROFILE_ECC: + bit_mask = EX_BMI_STORAGE_PROFILE_ECC; + break; + case FMAN_EX_BMI_STATISTICS_RAM_ECC: + bit_mask = EX_BMI_STATISTICS_RAM_ECC; + break; + case FMAN_EX_BMI_DISPATCH_RAM_ECC: + bit_mask = EX_BMI_DISPATCH_RAM_ECC; + break; + case FMAN_EX_MURAM_ECC: + bit_mask = EX_MURAM_ECC; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static int get_module_event(enum fman_event_modules module, u8 mod_id, + enum fman_intr_type intr_type) +{ + int event; + + switch (module) { + case FMAN_MOD_MAC: + if (intr_type == FMAN_INTR_TYPE_ERR) + event = FMAN_EV_ERR_MAC0 + mod_id; + else + event = FMAN_EV_MAC0 + mod_id; + break; + case FMAN_MOD_FMAN_CTRL: + if (intr_type == FMAN_INTR_TYPE_ERR) + event = FMAN_EV_CNT; + else + event = (FMAN_EV_FMAN_CTRL_0 + mod_id); + break; + case FMAN_MOD_DUMMY_LAST: + event = FMAN_EV_CNT; + break; + default: + event = FMAN_EV_CNT; + break; + } + + return event; +} + +static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo, + u32 *extra_size_of_fifo) +{ + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + u32 fifo = *size_of_fifo; + u32 extra_fifo = *extra_size_of_fifo; + u32 tmp; + + /* if this is the first time a port requires extra_fifo_pool_size, + * the total extra_fifo_pool_size must be initialized to 1 buffer per + * port + */ + if (extra_fifo && !fman->state->extra_fifo_pool_size) + fman->state->extra_fifo_pool_size = + fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS; + + fman->state->extra_fifo_pool_size = + max(fman->state->extra_fifo_pool_size, extra_fifo); + + /* check that there are enough uncommitted fifo size */ + if ((fman->state->accumulated_fifo_size + fifo) > + (fman->state->total_fifo_size - + fman->state->extra_fifo_pool_size)) { + dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n", + __func__); + return -EAGAIN; + } + + /* Read, modify and write to HW */ + tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) | + ((extra_fifo / FMAN_BMI_FIFO_UNITS) << + BMI_EXTRA_FIFO_SIZE_SHIFT); + iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]); + + /* update accumulated */ + fman->state->accumulated_fifo_size += fifo; + + return 0; +} + +static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks, + u8 *num_of_extra_tasks) +{ + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + u8 tasks = *num_of_tasks; + u8 extra_tasks = *num_of_extra_tasks; + u32 tmp; + + if (extra_tasks) + fman->state->extra_tasks_pool_size = + max(fman->state->extra_tasks_pool_size, extra_tasks); + + /* check that there are enough uncommitted tasks */ + if ((fman->state->accumulated_num_of_tasks + tasks) > + (fman->state->total_num_of_tasks - + fman->state->extra_tasks_pool_size)) { + dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n", + __func__, fman->state->fm_id); + return -EAGAIN; + } + /* update accumulated */ + fman->state->accumulated_num_of_tasks += tasks; + + /* Write to HW */ + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & + ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK); + tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) | + (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT)); + iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); + + return 0; +} + +static int set_num_of_open_dmas(struct fman *fman, u8 port_id, + u8 *num_of_open_dmas, + u8 *num_of_extra_open_dmas) +{ + struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; + u8 open_dmas = *num_of_open_dmas; + u8 extra_open_dmas = *num_of_extra_open_dmas; + u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0; + u32 tmp; + + if (!open_dmas) { + /* Configuration according to values in the HW. + * read the current number of open Dma's + */ + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); + current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >> + BMI_EXTRA_NUM_OF_DMAS_SHIFT); + + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); + current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >> + BMI_NUM_OF_DMAS_SHIFT) + 1); + + /* This is the first configuration and user did not + * specify value (!open_dmas), reset values will be used + * and we just save these values for resource management + */ + fman->state->extra_open_dmas_pool_size = + (u8)max(fman->state->extra_open_dmas_pool_size, + current_extra_val); + fman->state->accumulated_num_of_open_dmas += current_val; + *num_of_open_dmas = current_val; + *num_of_extra_open_dmas = current_extra_val; + return 0; + } + + if (extra_open_dmas > current_extra_val) + fman->state->extra_open_dmas_pool_size = + (u8)max(fman->state->extra_open_dmas_pool_size, + extra_open_dmas); + + if ((fman->state->rev_info.major < 6) && + (fman->state->accumulated_num_of_open_dmas - current_val + + open_dmas > fman->state->max_num_of_open_dmas)) { + dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n", + __func__, fman->state->fm_id); + return -EAGAIN; + } else if ((fman->state->rev_info.major >= 6) && + !((fman->state->rev_info.major == 6) && + (fman->state->rev_info.minor == 0)) && + (fman->state->accumulated_num_of_open_dmas - + current_val + open_dmas > + fman->state->dma_thresh_max_commq + 1)) { + dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n", + __func__, fman->state->fm_id, + fman->state->dma_thresh_max_commq + 1); + return -EAGAIN; + } + + WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val); + /* update acummulated */ + fman->state->accumulated_num_of_open_dmas -= current_val; + fman->state->accumulated_num_of_open_dmas += open_dmas; + + if (fman->state->rev_info.major < 6) + total_num_dmas = + (u8)(fman->state->accumulated_num_of_open_dmas + + fman->state->extra_open_dmas_pool_size); + + /* calculate reg */ + tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & + ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK); + tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) | + (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT)); + iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); + + /* update total num of DMA's with committed number of open DMAS, + * and max uncommitted pool. + */ + if (total_num_dmas) { + tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK; + tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT; + iowrite32be(tmp, &bmi_rg->fmbm_cfg2); + } + + return 0; +} + +static int fman_config(struct fman *fman) +{ + void __iomem *base_addr; + int err; + + base_addr = fman->dts_params.base_addr; + + fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL); + if (!fman->state) + goto err_fm_state; + + /* Allocate the FM driver's parameters structure */ + fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL); + if (!fman->cfg) + goto err_fm_drv; + + /* Initialize MURAM block */ + fman->muram = + fman_muram_init(fman->dts_params.muram_res.start, + resource_size(&fman->dts_params.muram_res)); + if (!fman->muram) + goto err_fm_soc_specific; + + /* Initialize FM parameters which will be kept by the driver */ + fman->state->fm_id = fman->dts_params.id; + fman->state->fm_clk_freq = fman->dts_params.clk_freq; + fman->state->qman_channel_base = fman->dts_params.qman_channel_base; + fman->state->num_of_qman_channels = + fman->dts_params.num_of_qman_channels; + fman->state->res = fman->dts_params.res; + fman->exception_cb = fman_exceptions; + fman->bus_error_cb = fman_bus_error; + fman->fpm_regs = base_addr + FPM_OFFSET; + fman->bmi_regs = base_addr + BMI_OFFSET; + fman->qmi_regs = base_addr + QMI_OFFSET; + fman->dma_regs = base_addr + DMA_OFFSET; + fman->base_addr = base_addr; + + spin_lock_init(&fman->spinlock); + fman_defconfig(fman->cfg); + + fman->state->extra_fifo_pool_size = 0; + fman->state->exceptions = (EX_DMA_BUS_ERROR | + EX_DMA_READ_ECC | + EX_DMA_SYSTEM_WRITE_ECC | + EX_DMA_FM_WRITE_ECC | + EX_FPM_STALL_ON_TASKS | + EX_FPM_SINGLE_ECC | + EX_FPM_DOUBLE_ECC | + EX_QMI_DEQ_FROM_UNKNOWN_PORTID | + EX_BMI_LIST_RAM_ECC | + EX_BMI_STORAGE_PROFILE_ECC | + EX_BMI_STATISTICS_RAM_ECC | + EX_MURAM_ECC | + EX_BMI_DISPATCH_RAM_ECC | + EX_QMI_DOUBLE_ECC | + EX_QMI_SINGLE_ECC); + + /* Read FMan revision for future use*/ + fman_get_revision(fman, &fman->state->rev_info); + + err = fill_soc_specific_params(fman->state); + if (err) + goto err_fm_soc_specific; + + /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */ + if (fman->state->rev_info.major >= 6) + fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID; + + fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh; + + fman->state->total_num_of_tasks = + (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major, + fman->state->rev_info.minor, + fman->state->bmi_max_num_of_tasks); + + if (fman->state->rev_info.major < 6) { + fman->cfg->dma_comm_qtsh_clr_emer = + (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major, + fman->state->dma_thresh_max_commq); + + fman->cfg->dma_comm_qtsh_asrt_emer = + (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major, + fman->state->dma_thresh_max_commq); + + fman->cfg->dma_cam_num_of_entries = + DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major); + + fman->cfg->dma_read_buf_tsh_clr_emer = + DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_read_buf_tsh_asrt_emer = + DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_write_buf_tsh_clr_emer = + DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_write_buf_tsh_asrt_emer = + DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); + + fman->cfg->dma_axi_dbg_num_of_beats = + DFLT_AXI_DBG_NUM_OF_BEATS; + } + + return 0; + +err_fm_soc_specific: + kfree(fman->cfg); +err_fm_drv: + kfree(fman->state); +err_fm_state: + kfree(fman); + return -EINVAL; +} + +static int fman_init(struct fman *fman) +{ + struct fman_cfg *cfg = NULL; + int err = 0, i, count; + + if (is_init_done(fman->cfg)) + return -EINVAL; + + fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT; + + cfg = fman->cfg; + + /* clear revision-dependent non existing exception */ + if (fman->state->rev_info.major < 6) + fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC; + + if (fman->state->rev_info.major >= 6) + fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC; + + /* clear CPG */ + memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0, + fman->state->fm_port_num_of_cg); + + /* Save LIODN info before FMan reset + * Skipping non-existent port 0 (i = 1) + */ + for (i = 1; i < FMAN_LIODN_TBL; i++) { + u32 liodn_base; + + fman->liodn_offset[i] = + ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]); + liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]); + if (i % 2) { + /* FMDM_PLR LSB holds LIODN base for odd ports */ + liodn_base &= DMA_LIODN_BASE_MASK; + } else { + /* FMDM_PLR MSB holds LIODN base for even ports */ + liodn_base >>= DMA_LIODN_SHIFT; + liodn_base &= DMA_LIODN_BASE_MASK; + } + fman->liodn_base[i] = liodn_base; + } + + /* FMan Reset (supported only for FMan V2) */ + if (fman->state->rev_info.major >= 6) { + /* Errata A007273 */ + dev_dbg(fman->dev, "%s: FManV3 reset is not supported!\n", + __func__); + } else { + iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc); + /* Wait for reset completion */ + count = 100; + do { + udelay(1); + } while (((ioread32be(&fman->fpm_regs->fm_rstc)) & + FPM_RSTC_FM_RESET) && --count); + if (count == 0) + return -EBUSY; + } + + if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) { + resume(fman->fpm_regs); + /* Wait until QMI is not in halt not busy state */ + count = 100; + do { + udelay(1); + } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) & + QMI_GS_HALT_NOT_BUSY) && --count); + if (count == 0) + dev_warn(fman->dev, "%s: QMI is in halt not busy state\n", + __func__); + } + + if (clear_iram(fman) != 0) + return -EINVAL; + + cfg->exceptions = fman->state->exceptions; + + /* Init DMA Registers */ + + err = dma_init(fman); + if (err != 0) { + free_init_resources(fman); + return err; + } + + /* Init FPM Registers */ + fpm_init(fman->fpm_regs, fman->cfg); + + /* define common resources */ + /* allocate MURAM for FIFO according to total size */ + fman->fifo_offset = fman_muram_alloc(fman->muram, + fman->state->total_fifo_size); + if (IS_ERR_VALUE(fman->cam_offset)) { + free_init_resources(fman); + dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n", + __func__); + return -ENOMEM; + } + + cfg->fifo_base_addr = fman->fifo_offset; + cfg->total_fifo_size = fman->state->total_fifo_size; + cfg->total_num_of_tasks = fman->state->total_num_of_tasks; + cfg->clk_freq = fman->state->fm_clk_freq; + + /* Init BMI Registers */ + bmi_init(fman->bmi_regs, fman->cfg); + + /* Init QMI Registers */ + qmi_init(fman->qmi_regs, fman->cfg); + + err = enable(fman, cfg); + if (err != 0) + return err; + + enable_time_stamp(fman); + + kfree(fman->cfg); + fman->cfg = NULL; + + return 0; +} + +static int fman_set_exception(struct fman *fman, + enum fman_exceptions exception, bool enable) +{ + u32 bit_mask = 0; + + if (!is_init_done(fman->cfg)) + return -EINVAL; + + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + fman->state->exceptions |= bit_mask; + else + fman->state->exceptions &= ~bit_mask; + } else { + dev_err(fman->dev, "%s: Undefined exception (%d)\n", + __func__, exception); + return -EINVAL; + } + + return set_exception(fman, exception, enable); +} + +/** + * fman_register_intr + * @fman: A Pointer to FMan device + * @mod: Calling module + * @mod_id: Module id (if more than 1 exists, '0' if not) + * @intr_type: Interrupt type (error/normal) selection. + * @f_isr: The interrupt service routine. + * @h_src_arg: Argument to be passed to f_isr. + * + * Used to register an event handler to be processed by FMan + * + * Return: 0 on success; Error code otherwise. + */ +void fman_register_intr(struct fman *fman, enum fman_event_modules module, + u8 mod_id, enum fman_intr_type intr_type, + void (*isr_cb)(void *src_arg), void *src_arg) +{ + int event = 0; + + event = get_module_event(module, mod_id, intr_type); + WARN_ON(event >= FMAN_EV_CNT); + + /* register in local FM structure */ + fman->intr_mng[event].isr_cb = isr_cb; + fman->intr_mng[event].src_handle = src_arg; +} + +/** + * fman_unregister_intr + * @fman: A Pointer to FMan device + * @mod: Calling module + * @mod_id: Module id (if more than 1 exists, '0' if not) + * @intr_type: Interrupt type (error/normal) selection. + * + * Used to unregister an event handler to be processed by FMan + * + * Return: 0 on success; Error code otherwise. + */ +void fman_unregister_intr(struct fman *fman, enum fman_event_modules module, + u8 mod_id, enum fman_intr_type intr_type) +{ + int event = 0; + + event = get_module_event(module, mod_id, intr_type); + WARN_ON(event >= FMAN_EV_CNT); + + fman->intr_mng[event].isr_cb = NULL; + fman->intr_mng[event].src_handle = NULL; +} + +/** + * fman_set_port_params + * @fman: A Pointer to FMan device + * @port_params: Port parameters + * + * Used by FMan Port to pass parameters to the FMan + * + * Return: 0 on success; Error code otherwise. + */ +int fman_set_port_params(struct fman *fman, + struct fman_port_init_params *port_params) +{ + int err; + unsigned long flags; + u8 port_id = port_params->port_id, mac_id; + + spin_lock_irqsave(&fman->spinlock, flags); + + err = set_num_of_tasks(fman, port_params->port_id, + &port_params->num_of_tasks, + &port_params->num_of_extra_tasks); + if (err) + goto return_err; + + /* TX Ports */ + if (port_params->port_type != FMAN_PORT_TYPE_RX) { + u32 enq_th, deq_th, reg; + + /* update qmi ENQ/DEQ threshold */ + fman->state->accumulated_num_of_deq_tnums += + port_params->deq_pipeline_depth; + enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) & + QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT; + /* if enq_th is too big, we reduce it to the max value + * that is still 0 + */ + if (enq_th >= (fman->state->qmi_max_num_of_tnums - + fman->state->accumulated_num_of_deq_tnums)) { + enq_th = + fman->state->qmi_max_num_of_tnums - + fman->state->accumulated_num_of_deq_tnums - 1; + + reg = ioread32be(&fman->qmi_regs->fmqm_gc); + reg &= ~QMI_CFG_ENQ_MASK; + reg |= (enq_th << QMI_CFG_ENQ_SHIFT); + iowrite32be(reg, &fman->qmi_regs->fmqm_gc); + } + + deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) & + QMI_CFG_DEQ_MASK; + /* if deq_th is too small, we enlarge it to the min + * value that is still 0. + * depTh may not be larger than 63 + * (fman->state->qmi_max_num_of_tnums-1). + */ + if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) && + (deq_th < fman->state->qmi_max_num_of_tnums - 1)) { + deq_th = fman->state->accumulated_num_of_deq_tnums + 1; + reg = ioread32be(&fman->qmi_regs->fmqm_gc); + reg &= ~QMI_CFG_DEQ_MASK; + reg |= deq_th; + iowrite32be(reg, &fman->qmi_regs->fmqm_gc); + } + } + + err = set_size_of_fifo(fman, port_params->port_id, + &port_params->size_of_fifo, + &port_params->extra_size_of_fifo); + if (err) + goto return_err; + + err = set_num_of_open_dmas(fman, port_params->port_id, + &port_params->num_of_open_dmas, + &port_params->num_of_extra_open_dmas); + if (err) + goto return_err; + + set_port_liodn(fman, port_id, fman->liodn_base[port_id], + fman->liodn_offset[port_id]); + + if (fman->state->rev_info.major < 6) + set_port_order_restoration(fman->fpm_regs, port_id); + + mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); + + if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) { + fman->state->port_mfl[mac_id] = port_params->max_frame_length; + } else { + dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n", + __func__, port_id, mac_id); + err = -EINVAL; + goto return_err; + } + + spin_unlock_irqrestore(&fman->spinlock, flags); + + return 0; + +return_err: + spin_unlock_irqrestore(&fman->spinlock, flags); + return err; +} + +/** + * fman_reset_mac + * @fman: A Pointer to FMan device + * @mac_id: MAC id to be reset + * + * Reset a specific MAC + * + * Return: 0 on success; Error code otherwise. + */ +int fman_reset_mac(struct fman *fman, u8 mac_id) +{ + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + u32 msk, timeout = 100; + + if (fman->state->rev_info.major >= 6) { + dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n", + __func__); + return -EINVAL; + } + + /* Get the relevant bit mask */ + switch (mac_id) { + case 0: + msk = FPM_RSTC_MAC0_RESET; + break; + case 1: + msk = FPM_RSTC_MAC1_RESET; + break; + case 2: + msk = FPM_RSTC_MAC2_RESET; + break; + case 3: + msk = FPM_RSTC_MAC3_RESET; + break; + case 4: + msk = FPM_RSTC_MAC4_RESET; + break; + case 5: + msk = FPM_RSTC_MAC5_RESET; + break; + case 6: + msk = FPM_RSTC_MAC6_RESET; + break; + case 7: + msk = FPM_RSTC_MAC7_RESET; + break; + case 8: + msk = FPM_RSTC_MAC8_RESET; + break; + case 9: + msk = FPM_RSTC_MAC9_RESET; + break; + default: + dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n", + __func__, mac_id); + return -EINVAL; + } + + /* reset */ + iowrite32be(msk, &fpm_rg->fm_rstc); + while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout) + udelay(10); + + if (!timeout) + return -EIO; + + return 0; +} + +/** + * fman_set_mac_max_frame + * @fman: A Pointer to FMan device + * @mac_id: MAC id + * @mfl: Maximum frame length + * + * Set maximum frame length of specific MAC in FMan driver + * + * Return: 0 on success; Error code otherwise. + */ +int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) +{ + /* if port is already initialized, check that MaxFrameLength is smaller + * or equal to the port's max + */ + if ((!fman->state->port_mfl[mac_id]) || + (fman->state->port_mfl[mac_id] && + (mfl <= fman->state->port_mfl[mac_id]))) { + fman->state->mac_mfl[mac_id] = mfl; + } else { + dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n", + __func__); + return -EINVAL; + } + return 0; +} + +/** + * fman_get_clock_freq + * @fman: A Pointer to FMan device + * + * Get FMan clock frequency + * + * Return: FMan clock frequency + */ +u16 fman_get_clock_freq(struct fman *fman) +{ + return fman->state->fm_clk_freq; +} + +/** + * fman_get_bmi_max_fifo_size + * @fman: A Pointer to FMan device + * + * Get FMan maximum FIFO size + * + * Return: FMan Maximum FIFO size + */ +u32 fman_get_bmi_max_fifo_size(struct fman *fman) +{ + return fman->state->bmi_max_fifo_size; +} + +/** + * fman_get_revision + * @fman - Pointer to the FMan module + * @rev_info - A structure of revision information parameters. + * + * Returns the FM revision + * + * Allowed only following fman_init(). + * + * Return: 0 on success; Error code otherwise. + */ +void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info) +{ + u32 tmp; + + tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1); + rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >> + FPM_REV1_MAJOR_SHIFT); + rev_info->minor = tmp & FPM_REV1_MINOR_MASK; +} + +/** + * fman_get_qman_channel_id + * @fman: A Pointer to FMan device + * @port_id: Port id + * + * Get QMan channel ID associated to the Port id + * + * Return: QMan channel ID + */ +u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id) +{ + int i; + + if (fman->state->rev_info.major >= 6) { + u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b, + 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; + for (i = 0; i < fman->state->num_of_qman_channels; i++) { + if (port_ids[i] == port_id) + break; + } + } else { + u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1, + 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; + for (i = 0; i < fman->state->num_of_qman_channels; i++) { + if (port_ids[i] == port_id) + break; + } + } + + if (i == fman->state->num_of_qman_channels) + return 0; + + return fman->state->qman_channel_base + i; +} + +/** + * fman_get_mem_region + * @fman: A Pointer to FMan device + * + * Get FMan memory region + * + * Return: A structure with FMan memory region information + */ +struct resource *fman_get_mem_region(struct fman *fman) +{ + return fman->state->res; +} + +/* Bootargs defines */ +/* Extra headroom for RX buffers - Default, min and max */ +#define FSL_FM_RX_EXTRA_HEADROOM 64 +#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16 +#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384 + +/* Maximum frame length */ +#define FSL_FM_MAX_FRAME_SIZE 1522 +#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600 +#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64 + +/* Extra headroom for Rx buffers. + * FMan is instructed to allocate, on the Rx path, this amount of + * space at the beginning of a data buffer, beside the DPA private + * data area and the IC fields. + * Does not impact Tx buffer layout. + * Configurable from bootargs. 64 by default, it's needed on + * particular forwarding scenarios that add extra headers to the + * forwarded frame. + */ +int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; +module_param(fsl_fm_rx_extra_headroom, int, 0); +MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); + +/* Max frame size, across all interfaces. + * Configurable from bootargs, to avoid allocating oversized (socket) + * buffers when not using jumbo frames. + * Must be large enough to accommodate the network MTU, but small enough + * to avoid wasting skb memory. + * + * Could be overridden once, at boot-time, via the + * fm_set_max_frm() callback. + */ +int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; +module_param(fsl_fm_max_frm, int, 0); +MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces"); + +/** + * fman_get_max_frm + * + * Return: Max frame length configured in the FM driver + */ +u16 fman_get_max_frm(void) +{ + static bool fm_check_mfl; + + if (!fm_check_mfl) { + if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE || + fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) { + pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", + fsl_fm_max_frm, + FSL_FM_MIN_POSSIBLE_FRAME_SIZE, + FSL_FM_MAX_POSSIBLE_FRAME_SIZE, + FSL_FM_MAX_FRAME_SIZE); + fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; + } + fm_check_mfl = true; + } + + return fsl_fm_max_frm; +} +EXPORT_SYMBOL(fman_get_max_frm); + +/** + * fman_get_rx_extra_headroom + * + * Return: Extra headroom size configured in the FM driver + */ +int fman_get_rx_extra_headroom(void) +{ + static bool fm_check_rx_extra_headroom; + + if (!fm_check_rx_extra_headroom) { + if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX || + fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) { + pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", + fsl_fm_rx_extra_headroom, + FSL_FM_RX_EXTRA_HEADROOM_MIN, + FSL_FM_RX_EXTRA_HEADROOM_MAX, + FSL_FM_RX_EXTRA_HEADROOM); + fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; + } + + fm_check_rx_extra_headroom = true; + fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16); + } + + return fsl_fm_rx_extra_headroom; +} +EXPORT_SYMBOL(fman_get_rx_extra_headroom); + +/** + * fman_bind + * @dev: FMan OF device pointer + * + * Bind to a specific FMan device. + * + * Allowed only after the port was created. + * + * Return: A pointer to the FMan device + */ +struct fman *fman_bind(struct device *fm_dev) +{ + return (struct fman *)(dev_get_drvdata(get_device(fm_dev))); +} + +static irqreturn_t fman_err_irq(int irq, void *handle) +{ + struct fman *fman = (struct fman *)handle; + u32 pending; + struct fman_fpm_regs __iomem *fpm_rg; + irqreturn_t single_ret, ret = IRQ_NONE; + + if (!is_init_done(fman->cfg)) + return IRQ_NONE; + + fpm_rg = fman->fpm_regs; + + /* error interrupts */ + pending = ioread32be(&fpm_rg->fm_epi); + if (!pending) + return IRQ_NONE; + + if (pending & ERR_INTR_EN_BMI) { + single_ret = bmi_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_QMI) { + single_ret = qmi_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_FPM) { + single_ret = fpm_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_DMA) { + single_ret = dma_err_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MURAM) { + single_ret = muram_err_intr(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + /* MAC error interrupts */ + if (pending & ERR_INTR_EN_MAC0) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC1) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC2) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC3) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC4) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC5) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC6) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC7) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC8) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & ERR_INTR_EN_MAC9) { + single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + return ret; +} + +static irqreturn_t fman_irq(int irq, void *handle) +{ + struct fman *fman = (struct fman *)handle; + u32 pending; + struct fman_fpm_regs __iomem *fpm_rg; + irqreturn_t single_ret, ret = IRQ_NONE; + + if (!is_init_done(fman->cfg)) + return IRQ_NONE; + + fpm_rg = fman->fpm_regs; + + /* normal interrupts */ + pending = ioread32be(&fpm_rg->fm_npi); + if (!pending) + return IRQ_NONE; + + if (pending & INTR_EN_QMI) { + single_ret = qmi_event(fman); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + /* MAC interrupts */ + if (pending & INTR_EN_MAC0) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC1) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC2) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC3) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC4) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC5) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC6) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC7) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC8) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + if (pending & INTR_EN_MAC9) { + single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9); + if (single_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + return ret; +} + +static const struct of_device_id fman_muram_match[] = { + { + .compatible = "fsl,fman-muram"}, + {} +}; +MODULE_DEVICE_TABLE(of, fman_muram_match); + +static struct fman *read_dts_node(struct platform_device *of_dev) +{ + struct fman *fman; + struct device_node *fm_node, *muram_node; + struct resource *res; + const u32 *u32_prop; + int lenp, err, irq; + struct clk *clk; + u32 clk_rate; + phys_addr_t phys_base_addr; + resource_size_t mem_size; + + fman = kzalloc(sizeof(*fman), GFP_KERNEL); + if (!fman) + return NULL; + + fm_node = of_node_get(of_dev->dev.of_node); + + u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n", + __func__, fm_node->full_name); + goto fman_node_put; + } + if (WARN_ON(lenp != sizeof(u32))) + goto fman_node_put; + + fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]); + + /* Get the FM interrupt */ + res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n", + __func__); + goto fman_node_put; + } + irq = res->start; + + /* Get the FM error interrupt */ + res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1); + if (!res) { + dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n", + __func__); + goto fman_node_put; + } + fman->dts_params.err_irq = res->start; + + /* Get the FM address */ + res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n", + __func__); + goto fman_node_put; + } + + phys_base_addr = res->start; + mem_size = resource_size(res); + + clk = of_clk_get(fm_node, 0); + if (IS_ERR(clk)) { + dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n", + __func__, fman->dts_params.id); + goto fman_node_put; + } + + clk_rate = clk_get_rate(clk); + if (!clk_rate) { + dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n", + __func__, fman->dts_params.id); + goto fman_node_put; + } + /* Rounding to MHz */ + fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000); + + u32_prop = (const u32 *)of_get_property(fm_node, + "fsl,qman-channel-range", + &lenp); + if (!u32_prop) { + dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n", + __func__, fm_node->full_name); + goto fman_node_put; + } + if (WARN_ON(lenp != sizeof(u32) * 2)) + goto fman_node_put; + fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]); + fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]); + + /* Get the MURAM base address and size */ + muram_node = of_find_matching_node(fm_node, fman_muram_match); + if (!muram_node) { + dev_err(&of_dev->dev, "%s: could not find MURAM node\n", + __func__); + goto fman_node_put; + } + + err = of_address_to_resource(muram_node, 0, + &fman->dts_params.muram_res); + if (err) { + of_node_put(muram_node); + dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", + __func__, err); + goto fman_node_put; + } + + of_node_put(muram_node); + of_node_put(fm_node); + + err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); + if (err < 0) { + dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", + __func__, irq, err); + goto fman_free; + } + + if (fman->dts_params.err_irq != 0) { + err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq, + fman_err_irq, IRQF_SHARED, + "fman-err", fman); + if (err < 0) { + dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", + __func__, fman->dts_params.err_irq, err); + goto fman_free; + } + } + + fman->dts_params.res = + devm_request_mem_region(&of_dev->dev, phys_base_addr, + mem_size, "fman"); + if (!fman->dts_params.res) { + dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", + __func__); + goto fman_free; + } + + fman->dts_params.base_addr = + devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); + if (fman->dts_params.base_addr == 0) { + dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); + goto fman_free; + } + + return fman; + +fman_node_put: + of_node_put(fm_node); +fman_free: + kfree(fman); + return NULL; +} + +static int fman_probe(struct platform_device *of_dev) +{ + struct fman *fman; + struct device *dev; + int err; + + dev = &of_dev->dev; + + fman = read_dts_node(of_dev); + if (!fman) + return -EIO; + + err = fman_config(fman); + if (err) { + dev_err(dev, "%s: FMan config failed\n", __func__); + return -EINVAL; + } + + if (fman_init(fman) != 0) { + dev_err(dev, "%s: FMan init failed\n", __func__); + return -EINVAL; + } + + if (fman->dts_params.err_irq == 0) { + fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false); + fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false); + fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false); + fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false); + fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false); + fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false); + fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false); + fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false); + fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false); + fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false); + fman_set_exception(fman, + FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false); + fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false); + fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC, + false); + fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false); + fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false); + } + + dev_set_drvdata(dev, fman); + + fman->dev = dev; + + dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); + + return 0; +} + +static const struct of_device_id fman_match[] = { + { + .compatible = "fsl,fman"}, + {} +}; + +MODULE_DEVICE_TABLE(of, fm_match); + +static struct platform_driver fman_driver = { + .driver = { + .name = "fsl-fman", + .of_match_table = fman_match, + }, + .probe = fman_probe, +}; + +builtin_platform_driver(fman_driver); diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h new file mode 100644 index 000000000000..57aae8d17d77 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -0,0 +1,325 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FM_H +#define __FM_H + +#include <linux/io.h> + +/* FM Frame descriptor macros */ +/* Frame queue Context Override */ +#define FM_FD_CMD_FCO 0x80000000 +#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */ +#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */ + +/* TX-Port: Unsupported Format */ +#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000 +/* TX Port: Length Error */ +#define FM_FD_ERR_LENGTH 0x02000000 +#define FM_FD_ERR_DMA 0x01000000 /* DMA Data error */ + +/* IPR frame (not error) */ +#define FM_FD_IPR 0x00000001 +/* IPR non-consistent-sp */ +#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR) +/* IPR error */ +#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR) +/* IPR timeout */ +#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR) +/* TX Port: Length Error */ +#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR) + +/* Rx FIFO overflow, FCS error, code error, running disparity error + * (SGMII and TBI modes), FIFO parity error. PHY Sequence error, + * PHY error control character detected. + */ +#define FM_FD_ERR_PHYSICAL 0x00080000 +/* Frame too long OR Frame size exceeds max_length_frame */ +#define FM_FD_ERR_SIZE 0x00040000 +/* classification discard */ +#define FM_FD_ERR_CLS_DISCARD 0x00020000 +/* Extract Out of Frame */ +#define FM_FD_ERR_EXTRACTION 0x00008000 +/* No Scheme Selected */ +#define FM_FD_ERR_NO_SCHEME 0x00004000 +/* Keysize Overflow */ +#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000 +/* Frame color is red */ +#define FM_FD_ERR_COLOR_RED 0x00000800 +/* Frame color is yellow */ +#define FM_FD_ERR_COLOR_YELLOW 0x00000400 +/* Parser Time out Exceed */ +#define FM_FD_ERR_PRS_TIMEOUT 0x00000080 +/* Invalid Soft Parser instruction */ +#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040 +/* Header error was identified during parsing */ +#define FM_FD_ERR_PRS_HDR_ERR 0x00000020 +/* Frame parsed beyind 256 first bytes */ +#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008 + +/* non Frame-Manager error */ +#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000 + +/* FMan driver defines */ +#define FMAN_BMI_FIFO_UNITS 0x100 +#define OFFSET_UNITS 16 + +/* BMan defines */ +#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */ +#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */ + +struct fman; /* FMan data */ + +/* Enum for defining port types */ +enum fman_port_type { + FMAN_PORT_TYPE_TX = 0, /* TX Port */ + FMAN_PORT_TYPE_RX, /* RX Port */ +}; + +struct fman_rev_info { + u8 major; /* Major revision */ + u8 minor; /* Minor revision */ +}; + +enum fman_exceptions { + FMAN_EX_DMA_BUS_ERROR = 0, /* DMA bus error. */ + FMAN_EX_DMA_READ_ECC, /* Read Buffer ECC error */ + FMAN_EX_DMA_SYSTEM_WRITE_ECC, /* Write Buffer ECC err on sys side */ + FMAN_EX_DMA_FM_WRITE_ECC, /* Write Buffer ECC error on FM side */ + FMAN_EX_DMA_SINGLE_PORT_ECC, /* Single Port ECC error on FM side */ + FMAN_EX_FPM_STALL_ON_TASKS, /* Stall of tasks on FPM */ + FMAN_EX_FPM_SINGLE_ECC, /* Single ECC on FPM. */ + FMAN_EX_FPM_DOUBLE_ECC, /* Double ECC error on FPM ram access */ + FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */ + FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */ + FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */ + FMAN_EX_BMI_LIST_RAM_ECC, /* Linked List RAM ECC error */ + FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */ + FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */ + FMAN_EX_BMI_DISPATCH_RAM_ECC, /* Dispatch RAM ECC Error Enable */ + FMAN_EX_IRAM_ECC, /* Double bit ECC occurred on IRAM */ + FMAN_EX_MURAM_ECC /* Double bit ECC occurred on MURAM */ +}; + +/* Parse results memory layout */ +struct fman_prs_result { + u8 lpid; /* Logical port id */ + u8 shimr; /* Shim header result */ + u16 l2r; /* Layer 2 result */ + u16 l3r; /* Layer 3 result */ + u8 l4r; /* Layer 4 result */ + u8 cplan; /* Classification plan id */ + u16 nxthdr; /* Next Header */ + u16 cksum; /* Running-sum */ + /* Flags&fragment-offset field of the last IP-header */ + u16 flags_frag_off; + /* Routing type field of a IPV6 routing extension header */ + u8 route_type; + /* Routing Extension Header Present; last bit is IP valid */ + u8 rhp_ip_valid; + u8 shim_off[2]; /* Shim offset */ + u8 ip_pid_off; /* IP PID (last IP-proto) offset */ + u8 eth_off; /* ETH offset */ + u8 llc_snap_off; /* LLC_SNAP offset */ + u8 vlan_off[2]; /* VLAN offset */ + u8 etype_off; /* ETYPE offset */ + u8 pppoe_off; /* PPP offset */ + u8 mpls_off[2]; /* MPLS offset */ + u8 ip_off[2]; /* IP offset */ + u8 gre_off; /* GRE offset */ + u8 l4_off; /* Layer 4 offset */ + u8 nxthdr_off; /* Parser end point */ +}; + +/* A structure for defining buffer prefix area content. */ +struct fman_buffer_prefix_content { + /* Number of bytes to be left at the beginning of the external + * buffer; Note that the private-area will start from the base + * of the buffer address. + */ + u16 priv_data_size; + /* true to pass the parse result to/from the FM; + * User may use FM_PORT_GetBufferPrsResult() in + * order to get the parser-result from a buffer. + */ + bool pass_prs_result; + /* true to pass the timeStamp to/from the FM User */ + bool pass_time_stamp; + /* true to pass the KG hash result to/from the FM User may + * use FM_PORT_GetBufferHashResult() in order to get the + * parser-result from a buffer. + */ + bool pass_hash_result; + /* Add all other Internal-Context information: AD, + * hash-result, key, etc. + */ + u16 data_align; +}; + +/* A structure of information about each of the external + * buffer pools used by a port or storage-profile. + */ +struct fman_ext_pool_params { + u8 id; /* External buffer pool id */ + u16 size; /* External buffer pool buffer size */ +}; + +/* A structure for informing the driver about the external + * buffer pools allocated in the BM and used by a port or a + * storage-profile. + */ +struct fman_ext_pools { + u8 num_of_pools_used; /* Number of pools use by this port */ + struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM]; + /* Parameters for each port */ +}; + +/* A structure for defining BM pool depletion criteria */ +struct fman_buf_pool_depletion { + /* select mode in which pause frames will be sent after a + * number of pools (all together!) are depleted + */ + bool pools_grp_mode_enable; + /* the number of depleted pools that will invoke pause + * frames transmission. + */ + u8 num_of_pools; + /* For each pool, true if it should be considered for + * depletion (Note - this pool must be used by this port!). + */ + bool pools_to_consider[BM_MAX_NUM_OF_POOLS]; + /* select mode in which pause frames will be sent + * after a single-pool is depleted; + */ + bool single_pool_mode_enable; + /* For each pool, true if it should be considered + * for depletion (Note - this pool must be used by this port!) + */ + bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS]; +}; + +/* Enum for inter-module interrupts registration */ +enum fman_event_modules { + FMAN_MOD_MAC = 0, /* MAC event */ + FMAN_MOD_FMAN_CTRL, /* FMAN Controller */ + FMAN_MOD_DUMMY_LAST +}; + +/* Enum for interrupts types */ +enum fman_intr_type { + FMAN_INTR_TYPE_ERR, + FMAN_INTR_TYPE_NORMAL +}; + +/* Enum for inter-module interrupts registration */ +enum fman_inter_module_event { + FMAN_EV_ERR_MAC0 = 0, /* MAC 0 error event */ + FMAN_EV_ERR_MAC1, /* MAC 1 error event */ + FMAN_EV_ERR_MAC2, /* MAC 2 error event */ + FMAN_EV_ERR_MAC3, /* MAC 3 error event */ + FMAN_EV_ERR_MAC4, /* MAC 4 error event */ + FMAN_EV_ERR_MAC5, /* MAC 5 error event */ + FMAN_EV_ERR_MAC6, /* MAC 6 error event */ + FMAN_EV_ERR_MAC7, /* MAC 7 error event */ + FMAN_EV_ERR_MAC8, /* MAC 8 error event */ + FMAN_EV_ERR_MAC9, /* MAC 9 error event */ + FMAN_EV_MAC0, /* MAC 0 event (Magic packet detection) */ + FMAN_EV_MAC1, /* MAC 1 event (Magic packet detection) */ + FMAN_EV_MAC2, /* MAC 2 (Magic packet detection) */ + FMAN_EV_MAC3, /* MAC 3 (Magic packet detection) */ + FMAN_EV_MAC4, /* MAC 4 (Magic packet detection) */ + FMAN_EV_MAC5, /* MAC 5 (Magic packet detection) */ + FMAN_EV_MAC6, /* MAC 6 (Magic packet detection) */ + FMAN_EV_MAC7, /* MAC 7 (Magic packet detection) */ + FMAN_EV_MAC8, /* MAC 8 event (Magic packet detection) */ + FMAN_EV_MAC9, /* MAC 9 event (Magic packet detection) */ + FMAN_EV_FMAN_CTRL_0, /* Fman controller event 0 */ + FMAN_EV_FMAN_CTRL_1, /* Fman controller event 1 */ + FMAN_EV_FMAN_CTRL_2, /* Fman controller event 2 */ + FMAN_EV_FMAN_CTRL_3, /* Fman controller event 3 */ + FMAN_EV_CNT +}; + +struct fman_intr_src { + void (*isr_cb)(void *src_arg); + void *src_handle; +}; + +/* Structure for port-FM communication during fman_port_init. */ +struct fman_port_init_params { + u8 port_id; /* port Id */ + enum fman_port_type port_type; /* Port type */ + u16 port_speed; /* Port speed */ + u16 liodn_offset; /* Port's requested resource */ + u8 num_of_tasks; /* Port's requested resource */ + u8 num_of_extra_tasks; /* Port's requested resource */ + u8 num_of_open_dmas; /* Port's requested resource */ + u8 num_of_extra_open_dmas; /* Port's requested resource */ + u32 size_of_fifo; /* Port's requested resource */ + u32 extra_size_of_fifo; /* Port's requested resource */ + u8 deq_pipeline_depth; /* Port's requested resource */ + u16 max_frame_length; /* Port's max frame length. */ + u16 liodn_base; + /* LIODN base for this port, to be used together with LIODN offset. */ +}; + +void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info); + +void fman_register_intr(struct fman *fman, enum fman_event_modules mod, + u8 mod_id, enum fman_intr_type intr_type, + void (*f_isr)(void *h_src_arg), void *h_src_arg); + +void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod, + u8 mod_id, enum fman_intr_type intr_type); + +int fman_set_port_params(struct fman *fman, + struct fman_port_init_params *port_params); + +int fman_reset_mac(struct fman *fman, u8 mac_id); + +u16 fman_get_clock_freq(struct fman *fman); + +u32 fman_get_bmi_max_fifo_size(struct fman *fman); + +int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl); + +u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id); + +struct resource *fman_get_mem_region(struct fman *fman); + +u16 fman_get_max_frm(void); + +int fman_get_rx_extra_headroom(void); + +struct fman *fman_bind(struct device *dev); + +#endif /* __FM_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c new file mode 100644 index 000000000000..6b1261c0b1c2 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -0,0 +1,1453 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_dtsec.h" +#include "fman.h" + +#include <linux/slab.h> +#include <linux/bitrev.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/phy.h> +#include <linux/crc32.h> +#include <linux/of_mdio.h> +#include <linux/mii.h> + +/* TBI register addresses */ +#define MII_TBICON 0x11 + +/* TBICON register bit fields */ +#define TBICON_SOFT_RESET 0x8000 /* Soft reset */ +#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */ +#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */ +#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */ +#define TBICON_CLK_SELECT 0x0020 /* Clock select */ +#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */ + +#define TBIANA_SGMII 0x4001 +#define TBIANA_1000X 0x01a0 + +/* Interrupt Mask Register (IMASK) */ +#define DTSEC_IMASK_BREN 0x80000000 +#define DTSEC_IMASK_RXCEN 0x40000000 +#define DTSEC_IMASK_MSROEN 0x04000000 +#define DTSEC_IMASK_GTSCEN 0x02000000 +#define DTSEC_IMASK_BTEN 0x01000000 +#define DTSEC_IMASK_TXCEN 0x00800000 +#define DTSEC_IMASK_TXEEN 0x00400000 +#define DTSEC_IMASK_LCEN 0x00040000 +#define DTSEC_IMASK_CRLEN 0x00020000 +#define DTSEC_IMASK_XFUNEN 0x00010000 +#define DTSEC_IMASK_ABRTEN 0x00008000 +#define DTSEC_IMASK_IFERREN 0x00004000 +#define DTSEC_IMASK_MAGEN 0x00000800 +#define DTSEC_IMASK_MMRDEN 0x00000400 +#define DTSEC_IMASK_MMWREN 0x00000200 +#define DTSEC_IMASK_GRSCEN 0x00000100 +#define DTSEC_IMASK_TDPEEN 0x00000002 +#define DTSEC_IMASK_RDPEEN 0x00000001 + +#define DTSEC_EVENTS_MASK \ + ((u32)(DTSEC_IMASK_BREN | \ + DTSEC_IMASK_RXCEN | \ + DTSEC_IMASK_BTEN | \ + DTSEC_IMASK_TXCEN | \ + DTSEC_IMASK_TXEEN | \ + DTSEC_IMASK_ABRTEN | \ + DTSEC_IMASK_LCEN | \ + DTSEC_IMASK_CRLEN | \ + DTSEC_IMASK_XFUNEN | \ + DTSEC_IMASK_IFERREN | \ + DTSEC_IMASK_MAGEN | \ + DTSEC_IMASK_TDPEEN | \ + DTSEC_IMASK_RDPEEN)) + +/* dtsec timestamp event bits */ +#define TMR_PEMASK_TSREEN 0x00010000 +#define TMR_PEVENT_TSRE 0x00010000 + +/* Group address bit indication */ +#define MAC_GROUP_ADDRESS 0x0000010000000000ULL + +/* Defaults */ +#define DEFAULT_HALFDUP_RETRANSMIT 0xf +#define DEFAULT_HALFDUP_COLL_WINDOW 0x37 +#define DEFAULT_TX_PAUSE_TIME 0xf000 +#define DEFAULT_RX_PREPEND 0 +#define DEFAULT_PREAMBLE_LEN 7 +#define DEFAULT_TX_PAUSE_TIME_EXTD 0 +#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40 +#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60 +#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50 +#define DEFAULT_BACK_TO_BACK_IPG 0x60 +#define DEFAULT_MAXIMUM_FRAME 0x600 + +/* register related defines (bits, field offsets..) */ +#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000 + +#define DTSEC_ECNTRL_GMIIM 0x00000040 +#define DTSEC_ECNTRL_TBIM 0x00000020 +#define DTSEC_ECNTRL_SGMIIM 0x00000002 +#define DTSEC_ECNTRL_RPM 0x00000010 +#define DTSEC_ECNTRL_R100M 0x00000008 +#define DTSEC_ECNTRL_QSGMIIM 0x00000001 + +#define DTSEC_TCTRL_GTS 0x00000020 + +#define RCTRL_PAL_MASK 0x001f0000 +#define RCTRL_PAL_SHIFT 16 +#define RCTRL_GHTX 0x00000400 +#define RCTRL_GRS 0x00000020 +#define RCTRL_MPROM 0x00000008 +#define RCTRL_RSF 0x00000004 +#define RCTRL_UPROM 0x00000001 + +#define MACCFG1_SOFT_RESET 0x80000000 +#define MACCFG1_RX_FLOW 0x00000020 +#define MACCFG1_TX_FLOW 0x00000010 +#define MACCFG1_TX_EN 0x00000001 +#define MACCFG1_RX_EN 0x00000004 + +#define MACCFG2_NIBBLE_MODE 0x00000100 +#define MACCFG2_BYTE_MODE 0x00000200 +#define MACCFG2_PAD_CRC_EN 0x00000004 +#define MACCFG2_FULL_DUPLEX 0x00000001 +#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000 +#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12 + +#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24 +#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16 +#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8 + +#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000 +#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000 +#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00 +#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F + +#define HAFDUP_EXCESS_DEFER 0x00010000 +#define HAFDUP_COLLISION_WINDOW 0x000003ff +#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12 +#define HAFDUP_RETRANSMISSION_MAX 0x0000f000 + +#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */ + +#define PTV_PTE_MASK 0xffff0000 +#define PTV_PT_MASK 0x0000ffff +#define PTV_PTE_SHIFT 16 + +#define MAX_PACKET_ALIGNMENT 31 +#define MAX_INTER_PACKET_GAP 0x7f +#define MAX_RETRANSMISSION 0x0f +#define MAX_COLLISION_WINDOW 0x03ff + +/* Hash table size (32 bits*8 regs) */ +#define DTSEC_HASH_TABLE_SIZE 256 +/* Extended Hash table size (32 bits*16 regs) */ +#define EXTENDED_HASH_TABLE_SIZE 512 + +/* dTSEC Memory Map registers */ +struct dtsec_regs { + /* dTSEC General Control and Status Registers */ + u32 tsec_id; /* 0x000 ETSEC_ID register */ + u32 tsec_id2; /* 0x004 ETSEC_ID2 register */ + u32 ievent; /* 0x008 Interrupt event register */ + u32 imask; /* 0x00C Interrupt mask register */ + u32 reserved0010[1]; + u32 ecntrl; /* 0x014 E control register */ + u32 ptv; /* 0x018 Pause time value register */ + u32 tbipa; /* 0x01C TBI PHY address register */ + u32 tmr_ctrl; /* 0x020 Time-stamp Control register */ + u32 tmr_pevent; /* 0x024 Time-stamp event register */ + u32 tmr_pemask; /* 0x028 Timer event mask register */ + u32 reserved002c[5]; + u32 tctrl; /* 0x040 Transmit control register */ + u32 reserved0044[3]; + u32 rctrl; /* 0x050 Receive control register */ + u32 reserved0054[11]; + u32 igaddr[8]; /* 0x080-0x09C Individual/group address */ + u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */ + u32 reserved00c0[16]; + u32 maccfg1; /* 0x100 MAC configuration #1 */ + u32 maccfg2; /* 0x104 MAC configuration #2 */ + u32 ipgifg; /* 0x108 IPG/IFG */ + u32 hafdup; /* 0x10C Half-duplex */ + u32 maxfrm; /* 0x110 Maximum frame */ + u32 reserved0114[10]; + u32 ifstat; /* 0x13C Interface status */ + u32 macstnaddr1; /* 0x140 Station Address,part 1 */ + u32 macstnaddr2; /* 0x144 Station Address,part 2 */ + struct { + u32 exact_match1; /* octets 1-4 */ + u32 exact_match2; /* octets 5-6 */ + } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */ + u32 reserved01c0[16]; + u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */ + u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */ + u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */ + u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */ + u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */ + u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */ + u32 trmgv; + /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */ + u32 rbyt; /* 0x21C receive byte counter */ + u32 rpkt; /* 0x220 receive packet counter */ + u32 rfcs; /* 0x224 receive FCS error counter */ + u32 rmca; /* 0x228 RMCA Rx multicast packet counter */ + u32 rbca; /* 0x22C Rx broadcast packet counter */ + u32 rxcf; /* 0x230 Rx control frame packet counter */ + u32 rxpf; /* 0x234 Rx pause frame packet counter */ + u32 rxuo; /* 0x238 Rx unknown OP code counter */ + u32 raln; /* 0x23C Rx alignment error counter */ + u32 rflr; /* 0x240 Rx frame length error counter */ + u32 rcde; /* 0x244 Rx code error counter */ + u32 rcse; /* 0x248 Rx carrier sense error counter */ + u32 rund; /* 0x24C Rx undersize packet counter */ + u32 rovr; /* 0x250 Rx oversize packet counter */ + u32 rfrg; /* 0x254 Rx fragments counter */ + u32 rjbr; /* 0x258 Rx jabber counter */ + u32 rdrp; /* 0x25C Rx drop */ + u32 tbyt; /* 0x260 Tx byte counter */ + u32 tpkt; /* 0x264 Tx packet counter */ + u32 tmca; /* 0x268 Tx multicast packet counter */ + u32 tbca; /* 0x26C Tx broadcast packet counter */ + u32 txpf; /* 0x270 Tx pause control frame counter */ + u32 tdfr; /* 0x274 Tx deferral packet counter */ + u32 tedf; /* 0x278 Tx excessive deferral packet counter */ + u32 tscl; /* 0x27C Tx single collision packet counter */ + u32 tmcl; /* 0x280 Tx multiple collision packet counter */ + u32 tlcl; /* 0x284 Tx late collision packet counter */ + u32 txcl; /* 0x288 Tx excessive collision packet counter */ + u32 tncl; /* 0x28C Tx total collision counter */ + u32 reserved0290[1]; + u32 tdrp; /* 0x294 Tx drop frame counter */ + u32 tjbr; /* 0x298 Tx jabber frame counter */ + u32 tfcs; /* 0x29C Tx FCS error counter */ + u32 txcf; /* 0x2A0 Tx control frame counter */ + u32 tovr; /* 0x2A4 Tx oversize frame counter */ + u32 tund; /* 0x2A8 Tx undersize frame counter */ + u32 tfrg; /* 0x2AC Tx fragments frame counter */ + u32 car1; /* 0x2B0 carry register one register* */ + u32 car2; /* 0x2B4 carry register two register* */ + u32 cam1; /* 0x2B8 carry register one mask register */ + u32 cam2; /* 0x2BC carry register two mask register */ + u32 reserved02c0[848]; +}; + +/* struct dtsec_cfg - dTSEC configuration + * Transmit half-duplex flow control, under software control for 10/100-Mbps + * half-duplex media. If set, back pressure is applied to media by raising + * carrier. + * halfdup_retransmit: + * Number of retransmission attempts following a collision. + * If this is exceeded dTSEC aborts transmission due to excessive collisions. + * The standard specifies the attempt limit to be 15. + * halfdup_coll_window: + * The number of bytes of the frame during which collisions may occur. + * The default value of 55 corresponds to the frame byte at the end of the + * standard 512-bit slot time window. If collisions are detected after this + * byte, the late collision event is asserted and transmission of current + * frame is aborted. + * tx_pad_crc: + * Pad and append CRC. If set, the MAC pads all ransmitted short frames and + * appends a CRC to every frame regardless of padding requirement. + * tx_pause_time: + * Transmit pause time value. This pause value is used as part of the pause + * frame to be sent when a transmit pause frame is initiated. + * If set to 0 this disables transmission of pause frames. + * preamble_len: + * Length, in bytes, of the preamble field preceding each Ethernet + * start-of-frame delimiter byte. The default value of 0x7 should be used in + * order to guarantee reliable operation with IEEE 802.3 compliant hardware. + * rx_prepend: + * Packet alignment padding length. The specified number of bytes (1-31) + * of zero padding are inserted before the start of each received frame. + * For Ethernet, where optional preamble extraction is enabled, the padding + * appears before the preamble, otherwise the padding precedes the + * layer 2 header. + * + * This structure contains basic dTSEC configuration and must be passed to + * init() function. A default set of configuration values can be + * obtained by calling set_dflts(). + */ +struct dtsec_cfg { + u16 halfdup_retransmit; + u16 halfdup_coll_window; + bool tx_pad_crc; + u16 tx_pause_time; + bool ptp_tsu_en; + bool ptp_exception_en; + u32 preamble_len; + u32 rx_prepend; + u16 tx_pause_time_extd; + u16 maximum_frame; + u32 non_back_to_back_ipg1; + u32 non_back_to_back_ipg2; + u32 min_ifg_enforcement; + u32 back_to_back_ipg; +}; + +struct fman_mac { + /* pointer to dTSEC memory mapped registers */ + struct dtsec_regs __iomem *regs; + /* MAC address of device */ + u64 addr; + /* Ethernet physical interface */ + phy_interface_t phy_if; + u16 max_speed; + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *exception_cb; + fman_mac_exception_cb *event_cb; + /* Number of individual addresses in registers for this station */ + u8 num_of_ind_addr_in_regs; + /* pointer to driver's global address hash table */ + struct eth_hash_t *multicast_addr_hash; + /* pointer to driver's individual address hash table */ + struct eth_hash_t *unicast_addr_hash; + u8 mac_id; + u32 exceptions; + bool ptp_tsu_enabled; + bool en_tsu_err_exeption; + struct dtsec_cfg *dtsec_drv_param; + void *fm; + struct fman_rev_info fm_rev_info; + bool basex_if; + struct phy_device *tbiphy; +}; + +static void set_dflts(struct dtsec_cfg *cfg) +{ + cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT; + cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW; + cfg->tx_pad_crc = true; + cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME; + /* PHY address 0 is reserved (DPAA RM) */ + cfg->rx_prepend = DEFAULT_RX_PREPEND; + cfg->ptp_tsu_en = true; + cfg->ptp_exception_en = true; + cfg->preamble_len = DEFAULT_PREAMBLE_LEN; + cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD; + cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1; + cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2; + cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT; + cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG; + cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME; +} + +static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, + phy_interface_t iface, u16 iface_speed, u8 *macaddr, + u32 exception_mask, u8 tbi_addr) +{ + bool is_rgmii, is_sgmii, is_qsgmii; + int i; + u32 tmp; + + /* Soft reset */ + iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1); + iowrite32be(0, ®s->maccfg1); + + /* dtsec_id2 */ + tmp = ioread32be(®s->tsec_id2); + + /* check RGMII support */ + if (iface == PHY_INTERFACE_MODE_RGMII || + iface == PHY_INTERFACE_MODE_RMII) + if (tmp & DTSEC_ID2_INT_REDUCED_OFF) + return -EINVAL; + + if (iface == PHY_INTERFACE_MODE_SGMII || + iface == PHY_INTERFACE_MODE_MII) + if (tmp & DTSEC_ID2_INT_REDUCED_OFF) + return -EINVAL; + + is_rgmii = iface == PHY_INTERFACE_MODE_RGMII; + is_sgmii = iface == PHY_INTERFACE_MODE_SGMII; + is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII; + + tmp = 0; + if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII) + tmp |= DTSEC_ECNTRL_GMIIM; + if (is_sgmii) + tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM); + if (is_qsgmii) + tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM | + DTSEC_ECNTRL_QSGMIIM); + if (is_rgmii) + tmp |= DTSEC_ECNTRL_RPM; + if (iface_speed == SPEED_100) + tmp |= DTSEC_ECNTRL_R100M; + + iowrite32be(tmp, ®s->ecntrl); + + tmp = 0; + + if (cfg->tx_pause_time) + tmp |= cfg->tx_pause_time; + if (cfg->tx_pause_time_extd) + tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT; + iowrite32be(tmp, ®s->ptv); + + tmp = 0; + tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK; + /* Accept short frames */ + tmp |= RCTRL_RSF; + + iowrite32be(tmp, ®s->rctrl); + + /* Assign a Phy Address to the TBI (TBIPA). + * Done also in cases where TBI is not selected to avoid conflict with + * the external PHY's Physical address + */ + iowrite32be(tbi_addr, ®s->tbipa); + + iowrite32be(0, ®s->tmr_ctrl); + + if (cfg->ptp_tsu_en) { + tmp = 0; + tmp |= TMR_PEVENT_TSRE; + iowrite32be(tmp, ®s->tmr_pevent); + + if (cfg->ptp_exception_en) { + tmp = 0; + tmp |= TMR_PEMASK_TSREEN; + iowrite32be(tmp, ®s->tmr_pemask); + } + } + + tmp = 0; + tmp |= MACCFG1_RX_FLOW; + tmp |= MACCFG1_TX_FLOW; + iowrite32be(tmp, ®s->maccfg1); + + tmp = 0; + + if (iface_speed < SPEED_1000) + tmp |= MACCFG2_NIBBLE_MODE; + else if (iface_speed == SPEED_1000) + tmp |= MACCFG2_BYTE_MODE; + + tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) & + MACCFG2_PREAMBLE_LENGTH_MASK; + if (cfg->tx_pad_crc) + tmp |= MACCFG2_PAD_CRC_EN; + /* Full Duplex */ + tmp |= MACCFG2_FULL_DUPLEX; + iowrite32be(tmp, ®s->maccfg2); + + tmp = (((cfg->non_back_to_back_ipg1 << + IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT) + & IPGIFG_NON_BACK_TO_BACK_IPG_1) + | ((cfg->non_back_to_back_ipg2 << + IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT) + & IPGIFG_NON_BACK_TO_BACK_IPG_2) + | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT) + & IPGIFG_MIN_IFG_ENFORCEMENT) + | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG)); + iowrite32be(tmp, ®s->ipgifg); + + tmp = 0; + tmp |= HAFDUP_EXCESS_DEFER; + tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT) + & HAFDUP_RETRANSMISSION_MAX); + tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW); + + iowrite32be(tmp, ®s->hafdup); + + /* Initialize Maximum frame length */ + iowrite32be(cfg->maximum_frame, ®s->maxfrm); + + iowrite32be(0xffffffff, ®s->cam1); + iowrite32be(0xffffffff, ®s->cam2); + + iowrite32be(exception_mask, ®s->imask); + + iowrite32be(0xffffffff, ®s->ievent); + + tmp = (u32)((macaddr[5] << 24) | + (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]); + iowrite32be(tmp, ®s->macstnaddr1); + + tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16)); + iowrite32be(tmp, ®s->macstnaddr2); + + /* HASH */ + for (i = 0; i < NUM_OF_HASH_REGS; i++) { + /* Initialize IADDRx */ + iowrite32be(0, ®s->igaddr[i]); + /* Initialize GADDRx */ + iowrite32be(0, ®s->gaddr[i]); + } + + return 0; +} + +static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr) +{ + u32 tmp; + + tmp = (u32)((adr[5] << 24) | + (adr[4] << 16) | (adr[3] << 8) | adr[2]); + iowrite32be(tmp, ®s->macstnaddr1); + + tmp = (u32)((adr[1] << 24) | (adr[0] << 16)); + iowrite32be(tmp, ®s->macstnaddr2); +} + +static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, + bool enable) +{ + int reg_idx = (bucket >> 5) & 0xf; + int bit_idx = bucket & 0x1f; + u32 bit_mask = 0x80000000 >> bit_idx; + u32 __iomem *reg; + + if (reg_idx > 7) + reg = ®s->gaddr[reg_idx - 8]; + else + reg = ®s->igaddr[reg_idx]; + + if (enable) + iowrite32be(ioread32be(reg) | bit_mask, reg); + else + iowrite32be(ioread32be(reg) & (~bit_mask), reg); +} + +static int check_init_parameters(struct fman_mac *dtsec) +{ + if (dtsec->max_speed >= SPEED_10000) { + pr_err("1G MAC driver supports 1G or lower speeds\n"); + return -EINVAL; + } + if (dtsec->addr == 0) { + pr_err("Ethernet MAC Must have a valid MAC Address\n"); + return -EINVAL; + } + if ((dtsec->dtsec_drv_param)->rx_prepend > + MAX_PACKET_ALIGNMENT) { + pr_err("packetAlignmentPadding can't be > than %d\n", + MAX_PACKET_ALIGNMENT); + return -EINVAL; + } + if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 > + MAX_INTER_PACKET_GAP) || + ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 > + MAX_INTER_PACKET_GAP) || + ((dtsec->dtsec_drv_param)->back_to_back_ipg > + MAX_INTER_PACKET_GAP)) { + pr_err("Inter packet gap can't be greater than %d\n", + MAX_INTER_PACKET_GAP); + return -EINVAL; + } + if ((dtsec->dtsec_drv_param)->halfdup_retransmit > + MAX_RETRANSMISSION) { + pr_err("maxRetransmission can't be greater than %d\n", + MAX_RETRANSMISSION); + return -EINVAL; + } + if ((dtsec->dtsec_drv_param)->halfdup_coll_window > + MAX_COLLISION_WINDOW) { + pr_err("collisionWindow can't be greater than %d\n", + MAX_COLLISION_WINDOW); + return -EINVAL; + /* If Auto negotiation process is disabled, need to set up the PHY + * using the MII Management Interface + */ + } + if (!dtsec->exception_cb) { + pr_err("uninitialized exception_cb\n"); + return -EINVAL; + } + if (!dtsec->event_cb) { + pr_err("uninitialized event_cb\n"); + return -EINVAL; + } + + return 0; +} + +static int get_exception_flag(enum fman_mac_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FM_MAC_EX_1G_BAB_RX: + bit_mask = DTSEC_IMASK_BREN; + break; + case FM_MAC_EX_1G_RX_CTL: + bit_mask = DTSEC_IMASK_RXCEN; + break; + case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET: + bit_mask = DTSEC_IMASK_GTSCEN; + break; + case FM_MAC_EX_1G_BAB_TX: + bit_mask = DTSEC_IMASK_BTEN; + break; + case FM_MAC_EX_1G_TX_CTL: + bit_mask = DTSEC_IMASK_TXCEN; + break; + case FM_MAC_EX_1G_TX_ERR: + bit_mask = DTSEC_IMASK_TXEEN; + break; + case FM_MAC_EX_1G_LATE_COL: + bit_mask = DTSEC_IMASK_LCEN; + break; + case FM_MAC_EX_1G_COL_RET_LMT: + bit_mask = DTSEC_IMASK_CRLEN; + break; + case FM_MAC_EX_1G_TX_FIFO_UNDRN: + bit_mask = DTSEC_IMASK_XFUNEN; + break; + case FM_MAC_EX_1G_MAG_PCKT: + bit_mask = DTSEC_IMASK_MAGEN; + break; + case FM_MAC_EX_1G_MII_MNG_RD_COMPLET: + bit_mask = DTSEC_IMASK_MMRDEN; + break; + case FM_MAC_EX_1G_MII_MNG_WR_COMPLET: + bit_mask = DTSEC_IMASK_MMWREN; + break; + case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET: + bit_mask = DTSEC_IMASK_GRSCEN; + break; + case FM_MAC_EX_1G_DATA_ERR: + bit_mask = DTSEC_IMASK_TDPEEN; + break; + case FM_MAC_EX_1G_RX_MIB_CNT_OVFL: + bit_mask = DTSEC_IMASK_MSROEN; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static bool is_init_done(struct dtsec_cfg *dtsec_drv_params) +{ + /* Checks if dTSEC driver parameters were initialized */ + if (!dtsec_drv_params) + return true; + + return false; +} + +static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + + if (is_init_done(dtsec->dtsec_drv_param)) + return 0; + + return (u16)ioread32be(®s->maxfrm); +} + +static void dtsec_isr(void *handle) +{ + struct fman_mac *dtsec = (struct fman_mac *)handle; + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 event; + + /* do not handle MDIO events */ + event = ioread32be(®s->ievent) & + (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN)); + + event &= ioread32be(®s->imask); + + iowrite32be(event, ®s->ievent); + + if (event & DTSEC_IMASK_BREN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX); + if (event & DTSEC_IMASK_RXCEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL); + if (event & DTSEC_IMASK_GTSCEN) + dtsec->exception_cb(dtsec->dev_id, + FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET); + if (event & DTSEC_IMASK_BTEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX); + if (event & DTSEC_IMASK_TXCEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL); + if (event & DTSEC_IMASK_TXEEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR); + if (event & DTSEC_IMASK_LCEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL); + if (event & DTSEC_IMASK_CRLEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT); + if (event & DTSEC_IMASK_XFUNEN) { + /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */ + if (dtsec->fm_rev_info.major == 2) { + u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i; + /* a. Write 0x00E0_0C00 to DTSEC_ID + * This is a read only register + * b. Read and save the value of TPKT + */ + tpkt1 = ioread32be(®s->tpkt); + + /* c. Read the register at dTSEC address offset 0x32C */ + tmp_reg1 = ioread32be(®s->reserved02c0[27]); + + /* d. Compare bits [9:15] to bits [25:31] of the + * register at address offset 0x32C. + */ + if ((tmp_reg1 & 0x007F0000) != + (tmp_reg1 & 0x0000007F)) { + /* If they are not equal, save the value of + * this register and wait for at least + * MAXFRM*16 ns + */ + usleep_range((u32)(min + (dtsec_get_max_frame_length(dtsec) * + 16 / 1000, 1)), (u32) + (min(dtsec_get_max_frame_length + (dtsec) * 16 / 1000, 1) + 1)); + } + + /* e. Read and save TPKT again and read the register + * at dTSEC address offset 0x32C again + */ + tpkt2 = ioread32be(®s->tpkt); + tmp_reg2 = ioread32be(®s->reserved02c0[27]); + + /* f. Compare the value of TPKT saved in step b to + * value read in step e. Also compare bits [9:15] of + * the register at offset 0x32C saved in step d to the + * value of bits [9:15] saved in step e. If the two + * registers values are unchanged, then the transmit + * portion of the dTSEC controller is locked up and + * the user should proceed to the recover sequence. + */ + if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) == + (tmp_reg2 & 0x007F0000))) { + /* recover sequence */ + + /* a.Write a 1 to RCTRL[GRS] */ + + iowrite32be(ioread32be(®s->rctrl) | + RCTRL_GRS, ®s->rctrl); + + /* b.Wait until IEVENT[GRSC]=1, or at least + * 100 us has elapsed. + */ + for (i = 0; i < 100; i++) { + if (ioread32be(®s->ievent) & + DTSEC_IMASK_GRSCEN) + break; + udelay(1); + } + if (ioread32be(®s->ievent) & + DTSEC_IMASK_GRSCEN) + iowrite32be(DTSEC_IMASK_GRSCEN, + ®s->ievent); + else + pr_debug("Rx lockup due to Tx lockup\n"); + + /* c.Write a 1 to bit n of FM_RSTC + * (offset 0x0CC of FPM) + */ + fman_reset_mac(dtsec->fm, dtsec->mac_id); + + /* d.Wait 4 Tx clocks (32 ns) */ + udelay(1); + + /* e.Write a 0 to bit n of FM_RSTC. */ + /* cleared by FMAN + */ + } + } + + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN); + } + if (event & DTSEC_IMASK_MAGEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT); + if (event & DTSEC_IMASK_GRSCEN) + dtsec->exception_cb(dtsec->dev_id, + FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET); + if (event & DTSEC_IMASK_TDPEEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR); + if (event & DTSEC_IMASK_RDPEEN) + dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR); + + /* masked interrupts */ + WARN_ON(event & DTSEC_IMASK_ABRTEN); + WARN_ON(event & DTSEC_IMASK_IFERREN); +} + +static void dtsec_1588_isr(void *handle) +{ + struct fman_mac *dtsec = (struct fman_mac *)handle; + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 event; + + if (dtsec->ptp_tsu_enabled) { + event = ioread32be(®s->tmr_pevent); + event &= ioread32be(®s->tmr_pemask); + + if (event) { + iowrite32be(event, ®s->tmr_pevent); + WARN_ON(event & TMR_PEVENT_TSRE); + dtsec->exception_cb(dtsec->dev_id, + FM_MAC_EX_1G_1588_TS_RX_ERR); + } + } +} + +static void free_init_resources(struct fman_mac *dtsec) +{ + fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_ERR); + fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_NORMAL); + + /* release the driver's group hash table */ + free_hash_table(dtsec->multicast_addr_hash); + dtsec->multicast_addr_hash = NULL; + + /* release the driver's individual hash table */ + free_hash_table(dtsec->unicast_addr_hash); + dtsec->unicast_addr_hash = NULL; +} + +int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val) +{ + if (is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + dtsec->dtsec_drv_param->maximum_frame = new_val; + + return 0; +} + +int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val) +{ + if (is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + dtsec->dtsec_drv_param->tx_pad_crc = new_val; + + return 0; +} + +int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Enable */ + tmp = ioread32be(®s->maccfg1); + if (mode & COMM_MODE_RX) + tmp |= MACCFG1_RX_EN; + if (mode & COMM_MODE_TX) + tmp |= MACCFG1_TX_EN; + + iowrite32be(tmp, ®s->maccfg1); + + /* Graceful start - clear the graceful receive stop bit */ + if (mode & COMM_MODE_TX) + iowrite32be(ioread32be(®s->tctrl) & ~DTSEC_TCTRL_GTS, + ®s->tctrl); + if (mode & COMM_MODE_RX) + iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS, + ®s->rctrl); + + return 0; +} + +int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Gracefull stop - Assert the graceful transmit stop bit */ + if (mode & COMM_MODE_RX) { + tmp = ioread32be(®s->rctrl) | RCTRL_GRS; + iowrite32be(tmp, ®s->rctrl); + + if (dtsec->fm_rev_info.major == 2) + usleep_range(100, 200); + else + udelay(10); + } + + if (mode & COMM_MODE_TX) { + if (dtsec->fm_rev_info.major == 2) + pr_debug("GTS not supported due to DTSEC_A004 errata.\n"); + else + pr_debug("GTS not supported due to DTSEC_A0014 errata.\n"); + } + + tmp = ioread32be(®s->maccfg1); + if (mode & COMM_MODE_RX) + tmp &= ~MACCFG1_RX_EN; + if (mode & COMM_MODE_TX) + tmp &= ~MACCFG1_TX_EN; + + iowrite32be(tmp, ®s->maccfg1); + + return 0; +} + +int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, + u8 __maybe_unused priority, + u16 pause_time, u16 __maybe_unused thresh_time) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 ptv = 0; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ + if (dtsec->fm_rev_info.major == 2) + if (pause_time <= 320) { + pr_warn("pause-time: %d illegal.Should be > 320\n", + pause_time); + return -EINVAL; + } + + if (pause_time) { + ptv = ioread32be(®s->ptv); + ptv &= PTV_PTE_MASK; + ptv |= pause_time & PTV_PT_MASK; + iowrite32be(ptv, ®s->ptv); + + /* trigger the transmission of a flow-control pause frame */ + iowrite32be(ioread32be(®s->maccfg1) | MACCFG1_TX_FLOW, + ®s->maccfg1); + } else + iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW, + ®s->maccfg1); + + return 0; +} + +int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->maccfg1); + if (en) + tmp |= MACCFG1_RX_FLOW; + else + tmp &= ~MACCFG1_RX_FLOW; + iowrite32be(tmp, ®s->maccfg1); + + return 0; +} + +int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) +{ + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Initialize MAC Station Address registers (1 & 2) + * Station address have to be swapped (big endian to little endian + */ + dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr); + set_mac_address(dtsec->regs, (u8 *)(*enet_addr)); + + return 0; +} + +int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + struct eth_hash_entry *hash_entry; + u64 addr; + s32 bucket; + u32 crc = 0xFFFFFFFF; + bool mcast, ghtx; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); + mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false); + + /* Cannot handle unicast mac addr when GHTX is on */ + if (ghtx && !mcast) { + pr_err("Could not compute hash bucket\n"); + return -EINVAL; + } + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + + /* considering the 9 highest order bits in crc H[8:0]: + *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register + *and H[5:1] (next 5 bits) identify the hash bit + *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register + *and H[4:0] (next 5 bits) identify the hash bit. + * + *In bucket index output the low 5 bits identify the hash register + *bit, while the higher 4 bits identify the hash register + */ + + if (ghtx) { + bucket = (s32)((crc >> 23) & 0x1ff); + } else { + bucket = (s32)((crc >> 24) & 0xff); + /* if !ghtx and mcast the bit must be set in gaddr instead of + *igaddr. + */ + if (mcast) + bucket += 0x100; + } + + set_bucket(dtsec->regs, bucket, true); + + /* Create element to be added to the driver hash table */ + hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + if (!hash_entry) + return -ENOMEM; + hash_entry->addr = addr; + INIT_LIST_HEAD(&hash_entry->node); + + if (addr & MAC_GROUP_ADDRESS) + /* Group Address */ + list_add_tail(&hash_entry->node, + &dtsec->multicast_addr_hash->lsts[bucket]); + else + list_add_tail(&hash_entry->node, + &dtsec->unicast_addr_hash->lsts[bucket]); + + return 0; +} + +int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + struct list_head *pos; + struct eth_hash_entry *hash_entry = NULL; + u64 addr; + s32 bucket; + u32 crc = 0xFFFFFFFF; + bool mcast, ghtx; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); + mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false); + + /* Cannot handle unicast mac addr when GHTX is on */ + if (ghtx && !mcast) { + pr_err("Could not compute hash bucket\n"); + return -EINVAL; + } + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + + if (ghtx) { + bucket = (s32)((crc >> 23) & 0x1ff); + } else { + bucket = (s32)((crc >> 24) & 0xff); + /* if !ghtx and mcast the bit must be set + * in gaddr instead of igaddr. + */ + if (mcast) + bucket += 0x100; + } + + if (addr & MAC_GROUP_ADDRESS) { + /* Group Address */ + list_for_each(pos, + &dtsec->multicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket])) + set_bucket(dtsec->regs, bucket, false); + } else { + /* Individual Address */ + list_for_each(pos, + &dtsec->unicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket])) + set_bucket(dtsec->regs, bucket, false); + } + + /* address does not exist */ + WARN_ON(!hash_entry); + + return 0; +} + +int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + /* Set unicast promiscuous */ + tmp = ioread32be(®s->rctrl); + if (new_val) + tmp |= RCTRL_UPROM; + else + tmp &= ~RCTRL_UPROM; + + iowrite32be(tmp, ®s->rctrl); + + /* Set multicast promiscuous */ + tmp = ioread32be(®s->rctrl); + if (new_val) + tmp |= RCTRL_MPROM; + else + tmp &= ~RCTRL_MPROM; + + iowrite32be(tmp, ®s->rctrl); + + return 0; +} + +int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->maccfg2); + + /* Full Duplex */ + tmp |= MACCFG2_FULL_DUPLEX; + + tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE); + if (speed < SPEED_1000) + tmp |= MACCFG2_NIBBLE_MODE; + else if (speed == SPEED_1000) + tmp |= MACCFG2_BYTE_MODE; + iowrite32be(tmp, ®s->maccfg2); + + tmp = ioread32be(®s->ecntrl); + if (speed == SPEED_100) + tmp |= DTSEC_ECNTRL_R100M; + else + tmp &= ~DTSEC_ECNTRL_R100M; + iowrite32be(tmp, ®s->ecntrl); + + return 0; +} + +int dtsec_restart_autoneg(struct fman_mac *dtsec) +{ + u16 tmp_reg16; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR); + + tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000); + tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART | + BMCR_FULLDPLX | BMCR_SPEED1000); + + phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); + + return 0; +} + +int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + *mac_version = ioread32be(®s->tsec_id); + + return 0; +} + +int dtsec_set_exception(struct fman_mac *dtsec, + enum fman_mac_exceptions exception, bool enable) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 bit_mask = 0; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) { + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + dtsec->exceptions |= bit_mask; + else + dtsec->exceptions &= ~bit_mask; + } else { + pr_err("Undefined exception\n"); + return -EINVAL; + } + if (enable) + iowrite32be(ioread32be(®s->imask) | bit_mask, + ®s->imask); + else + iowrite32be(ioread32be(®s->imask) & ~bit_mask, + ®s->imask); + } else { + if (!dtsec->ptp_tsu_enabled) { + pr_err("Exception valid for 1588 only\n"); + return -EINVAL; + } + switch (exception) { + case FM_MAC_EX_1G_1588_TS_RX_ERR: + if (enable) { + dtsec->en_tsu_err_exeption = true; + iowrite32be(ioread32be(®s->tmr_pemask) | + TMR_PEMASK_TSREEN, + ®s->tmr_pemask); + } else { + dtsec->en_tsu_err_exeption = false; + iowrite32be(ioread32be(®s->tmr_pemask) & + ~TMR_PEMASK_TSREEN, + ®s->tmr_pemask); + } + break; + default: + pr_err("Undefined exception\n"); + return -EINVAL; + } + } + + return 0; +} + +int dtsec_init(struct fman_mac *dtsec) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + struct dtsec_cfg *dtsec_drv_param; + int err; + u16 max_frm_ln; + enet_addr_t eth_addr; + + if (is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + if (DEFAULT_RESET_ON_INIT && + (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) { + pr_err("Can't reset MAC!\n"); + return -EINVAL; + } + + err = check_init_parameters(dtsec); + if (err) + return err; + + dtsec_drv_param = dtsec->dtsec_drv_param; + + MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr); + + err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if, + dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions, + dtsec->tbiphy->mdio.addr); + if (err) { + free_init_resources(dtsec); + pr_err("DTSEC version doesn't support this i/f mode\n"); + return err; + } + + if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) { + u16 tmp_reg16; + + /* Configure the TBI PHY Control Register */ + tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET; + phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); + + tmp_reg16 = TBICON_CLK_SELECT; + phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); + + tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE | + BMCR_FULLDPLX | BMCR_SPEED1000); + phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); + + if (dtsec->basex_if) + tmp_reg16 = TBIANA_1000X; + else + tmp_reg16 = TBIANA_SGMII; + phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16); + + tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART | + BMCR_FULLDPLX | BMCR_SPEED1000); + + phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); + } + + /* Max Frame Length */ + max_frm_ln = (u16)ioread32be(®s->maxfrm); + err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln); + if (err) { + pr_err("Setting max frame length failed\n"); + free_init_resources(dtsec); + return -EINVAL; + } + + dtsec->multicast_addr_hash = + alloc_hash_table(EXTENDED_HASH_TABLE_SIZE); + if (!dtsec->multicast_addr_hash) { + free_init_resources(dtsec); + pr_err("MC hash table is failed\n"); + return -ENOMEM; + } + + dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE); + if (!dtsec->unicast_addr_hash) { + free_init_resources(dtsec); + pr_err("UC hash table is failed\n"); + return -ENOMEM; + } + + /* register err intr handler for dtsec to FPM (err) */ + fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec); + /* register 1588 intr handler for TMR to FPM (normal) */ + fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id, + FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec); + + kfree(dtsec_drv_param); + dtsec->dtsec_drv_param = NULL; + + return 0; +} + +int dtsec_free(struct fman_mac *dtsec) +{ + free_init_resources(dtsec); + + kfree(dtsec->dtsec_drv_param); + dtsec->dtsec_drv_param = NULL; + kfree(dtsec); + + return 0; +} + +struct fman_mac *dtsec_config(struct fman_mac_params *params) +{ + struct fman_mac *dtsec; + struct dtsec_cfg *dtsec_drv_param; + void __iomem *base_addr; + + base_addr = params->base_addr; + + /* allocate memory for the UCC GETH data structure. */ + dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL); + if (!dtsec) + return NULL; + + /* allocate memory for the d_tsec driver parameters data structure. */ + dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL); + if (!dtsec_drv_param) + goto err_dtsec; + + /* Plant parameter structure pointer */ + dtsec->dtsec_drv_param = dtsec_drv_param; + + set_dflts(dtsec_drv_param); + + dtsec->regs = base_addr; + dtsec->addr = ENET_ADDR_TO_UINT64(params->addr); + dtsec->max_speed = params->max_speed; + dtsec->phy_if = params->phy_if; + dtsec->mac_id = params->mac_id; + dtsec->exceptions = (DTSEC_IMASK_BREN | + DTSEC_IMASK_RXCEN | + DTSEC_IMASK_BTEN | + DTSEC_IMASK_TXCEN | + DTSEC_IMASK_TXEEN | + DTSEC_IMASK_ABRTEN | + DTSEC_IMASK_LCEN | + DTSEC_IMASK_CRLEN | + DTSEC_IMASK_XFUNEN | + DTSEC_IMASK_IFERREN | + DTSEC_IMASK_MAGEN | + DTSEC_IMASK_TDPEEN | + DTSEC_IMASK_RDPEEN); + dtsec->exception_cb = params->exception_cb; + dtsec->event_cb = params->event_cb; + dtsec->dev_id = params->dev_id; + dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en; + dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en; + + dtsec->fm = params->fm; + dtsec->basex_if = params->basex_if; + + if (!params->internal_phy_node) { + pr_err("TBI PHY node is not available\n"); + goto err_dtsec_drv_param; + } + + dtsec->tbiphy = of_phy_find_device(params->internal_phy_node); + if (!dtsec->tbiphy) { + pr_err("of_phy_find_device (TBI PHY) failed\n"); + put_device(&dtsec->tbiphy->mdio.dev); + goto err_dtsec_drv_param; + } + + put_device(&dtsec->tbiphy->mdio.dev); + + /* Save FMan revision */ + fman_get_revision(dtsec->fm, &dtsec->fm_rev_info); + + return dtsec; + +err_dtsec_drv_param: + kfree(dtsec_drv_param); +err_dtsec: + kfree(dtsec); + return NULL; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h new file mode 100644 index 000000000000..c4467c072058 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -0,0 +1,59 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DTSEC_H +#define __DTSEC_H + +#include "fman_mac.h" + +struct fman_mac *dtsec_config(struct fman_mac_params *params); +int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val); +int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr); +int dtsec_adjust_link(struct fman_mac *dtsec, + u16 speed); +int dtsec_restart_autoneg(struct fman_mac *dtsec); +int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val); +int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val); +int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode); +int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode); +int dtsec_init(struct fman_mac *dtsec); +int dtsec_free(struct fman_mac *dtsec); +int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en); +int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority, + u16 pause_time, u16 thresh_time); +int dtsec_set_exception(struct fman_mac *dtsec, + enum fman_mac_exceptions exception, bool enable); +int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); +int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); +int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version); + +#endif /* __DTSEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h new file mode 100644 index 000000000000..8ddeedbcef9c --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -0,0 +1,278 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* FM MAC ... */ +#ifndef __FM_MAC_H +#define __FM_MAC_H + +#include "fman.h" + +#include <linux/slab.h> +#include <linux/phy.h> +#include <linux/if_ether.h> + +struct fman_mac; + +/* Ethernet Address */ +typedef u8 enet_addr_t[ETH_ALEN]; + +#define ENET_ADDR_TO_UINT64(_enet_addr) \ + (u64)(((u64)(_enet_addr)[0] << 40) | \ + ((u64)(_enet_addr)[1] << 32) | \ + ((u64)(_enet_addr)[2] << 24) | \ + ((u64)(_enet_addr)[3] << 16) | \ + ((u64)(_enet_addr)[4] << 8) | \ + ((u64)(_enet_addr)[5])) + +#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \ + do { \ + int i; \ + for (i = 0; i < ETH_ALEN; i++) \ + (_enet_addr)[i] = \ + (u8)((_addr64) >> ((5 - i) * 8)); \ + } while (0) + +/* defaults */ +#define DEFAULT_RESET_ON_INIT false + +/* PFC defines */ +#define FSL_FM_PAUSE_TIME_ENABLE 0xf000 +#define FSL_FM_PAUSE_TIME_DISABLE 0 +#define FSL_FM_PAUSE_THRESH_DEFAULT 0 + +#define FM_MAC_NO_PFC 0xff + +/* HASH defines */ +#define ETH_HASH_ENTRY_OBJ(ptr) \ + hlist_entry_safe(ptr, struct eth_hash_entry, node) + +/* Enumeration (bit flags) of communication modes (Transmit, + * receive or both). + */ +enum comm_mode { + COMM_MODE_NONE = 0, /* No transmit/receive communication */ + COMM_MODE_RX = 1, /* Only receive communication */ + COMM_MODE_TX = 2, /* Only transmit communication */ + COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */ +}; + +/* FM MAC Exceptions */ +enum fman_mac_exceptions { + FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0 + /* 10GEC MDIO scan event interrupt */ + , FM_MAC_EX_10G_MDIO_CMD_CMPL + /* 10GEC MDIO command completion interrupt */ + , FM_MAC_EX_10G_REM_FAULT + /* 10GEC, mEMAC Remote fault interrupt */ + , FM_MAC_EX_10G_LOC_FAULT + /* 10GEC, mEMAC Local fault interrupt */ + , FM_MAC_EX_10G_TX_ECC_ER + /* 10GEC, mEMAC Transmit frame ECC error interrupt */ + , FM_MAC_EX_10G_TX_FIFO_UNFL + /* 10GEC, mEMAC Transmit FIFO underflow interrupt */ + , FM_MAC_EX_10G_TX_FIFO_OVFL + /* 10GEC, mEMAC Transmit FIFO overflow interrupt */ + , FM_MAC_EX_10G_TX_ER + /* 10GEC Transmit frame error interrupt */ + , FM_MAC_EX_10G_RX_FIFO_OVFL + /* 10GEC, mEMAC Receive FIFO overflow interrupt */ + , FM_MAC_EX_10G_RX_ECC_ER + /* 10GEC, mEMAC Receive frame ECC error interrupt */ + , FM_MAC_EX_10G_RX_JAB_FRM + /* 10GEC Receive jabber frame interrupt */ + , FM_MAC_EX_10G_RX_OVRSZ_FRM + /* 10GEC Receive oversized frame interrupt */ + , FM_MAC_EX_10G_RX_RUNT_FRM + /* 10GEC Receive runt frame interrupt */ + , FM_MAC_EX_10G_RX_FRAG_FRM + /* 10GEC Receive fragment frame interrupt */ + , FM_MAC_EX_10G_RX_LEN_ER + /* 10GEC Receive payload length error interrupt */ + , FM_MAC_EX_10G_RX_CRC_ER + /* 10GEC Receive CRC error interrupt */ + , FM_MAC_EX_10G_RX_ALIGN_ER + /* 10GEC Receive alignment error interrupt */ + , FM_MAC_EX_1G_BAB_RX + /* dTSEC Babbling receive error */ + , FM_MAC_EX_1G_RX_CTL + /* dTSEC Receive control (pause frame) interrupt */ + , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET + /* dTSEC Graceful transmit stop complete */ + , FM_MAC_EX_1G_BAB_TX + /* dTSEC Babbling transmit error */ + , FM_MAC_EX_1G_TX_CTL + /* dTSEC Transmit control (pause frame) interrupt */ + , FM_MAC_EX_1G_TX_ERR + /* dTSEC Transmit error */ + , FM_MAC_EX_1G_LATE_COL + /* dTSEC Late collision */ + , FM_MAC_EX_1G_COL_RET_LMT + /* dTSEC Collision retry limit */ + , FM_MAC_EX_1G_TX_FIFO_UNDRN + /* dTSEC Transmit FIFO underrun */ + , FM_MAC_EX_1G_MAG_PCKT + /* dTSEC Magic Packet detection */ + , FM_MAC_EX_1G_MII_MNG_RD_COMPLET + /* dTSEC MII management read completion */ + , FM_MAC_EX_1G_MII_MNG_WR_COMPLET + /* dTSEC MII management write completion */ + , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET + /* dTSEC Graceful receive stop complete */ + , FM_MAC_EX_1G_DATA_ERR + /* dTSEC Internal data error on transmit */ + , FM_MAC_1G_RX_DATA_ERR + /* dTSEC Internal data error on receive */ + , FM_MAC_EX_1G_1588_TS_RX_ERR + /* dTSEC Time-Stamp Receive Error */ + , FM_MAC_EX_1G_RX_MIB_CNT_OVFL + /* dTSEC MIB counter overflow */ + , FM_MAC_EX_TS_FIFO_ECC_ERR + /* mEMAC Time-stamp FIFO ECC error interrupt; + * not supported on T4240/B4860 rev1 chips + */ + , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT + /* mEMAC Magic Packet Indication Interrupt */ +}; + +struct eth_hash_entry { + u64 addr; /* Ethernet Address */ + struct list_head node; +}; + +typedef void (fman_mac_exception_cb)(void *dev_id, + enum fman_mac_exceptions exceptions); + +/* FMan MAC config input */ +struct fman_mac_params { + /* Base of memory mapped FM MAC registers */ + void __iomem *base_addr; + /* MAC address of device; First octet is sent first */ + enet_addr_t addr; + /* MAC ID; numbering of dTSEC and 1G-mEMAC: + * 0 - FM_MAX_NUM_OF_1G_MACS; + * numbering of 10G-MAC (TGEC) and 10G-mEMAC: + * 0 - FM_MAX_NUM_OF_10G_MACS + */ + u8 mac_id; + /* PHY interface */ + phy_interface_t phy_if; + /* Note that the speed should indicate the maximum rate that + * this MAC should support rather than the actual speed; + */ + u16 max_speed; + /* A handle to the FM object this port related to */ + void *fm; + /* MDIO exceptions interrupt source - not valid for all + * MACs; MUST be set to 'NO_IRQ' for MACs that don't have + * mdio-irq, or for polling + */ + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */ + fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */ + /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC + * and phy or backplane; Note: 1000BaseX auto-negotiation relates only + * to interface between MAC and phy/backplane, SGMII phy can still + * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps + */ + bool basex_if; + /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */ + struct device_node *internal_phy_node; +}; + +struct eth_hash_t { + u16 size; + struct list_head *lsts; +}; + +static inline struct eth_hash_entry +*dequeue_addr_from_hash_entry(struct list_head *addr_lst) +{ + struct eth_hash_entry *hash_entry = NULL; + + if (!list_empty(addr_lst)) { + hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next); + list_del_init(&hash_entry->node); + } + return hash_entry; +} + +static inline void free_hash_table(struct eth_hash_t *hash) +{ + struct eth_hash_entry *hash_entry; + int i = 0; + + if (hash) { + if (hash->lsts) { + for (i = 0; i < hash->size; i++) { + hash_entry = + dequeue_addr_from_hash_entry(&hash->lsts[i]); + while (hash_entry) { + kfree(hash_entry); + hash_entry = + dequeue_addr_from_hash_entry(&hash-> + lsts[i]); + } + } + + kfree(hash->lsts); + } + + kfree(hash); + } +} + +static inline struct eth_hash_t *alloc_hash_table(u16 size) +{ + u32 i; + struct eth_hash_t *hash; + + /* Allocate address hash table */ + hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + if (!hash) + return NULL; + + hash->size = size; + + hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head), + GFP_KERNEL); + if (!hash->lsts) { + kfree(hash); + return NULL; + } + + for (i = 0; i < hash->size; i++) + INIT_LIST_HEAD(&hash->lsts[i]); + + return hash; +} + +#endif /* __FM_MAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c new file mode 100644 index 000000000000..45e98fd8b79e --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -0,0 +1,1170 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_memac.h" +#include "fman.h" + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/phy.h> +#include <linux/of_mdio.h> + +/* PCS registers */ +#define MDIO_SGMII_CR 0x00 +#define MDIO_SGMII_DEV_ABIL_SGMII 0x04 +#define MDIO_SGMII_LINK_TMR_L 0x12 +#define MDIO_SGMII_LINK_TMR_H 0x13 +#define MDIO_SGMII_IF_MODE 0x14 + +/* SGMII Control defines */ +#define SGMII_CR_AN_EN 0x1000 +#define SGMII_CR_RESTART_AN 0x0200 +#define SGMII_CR_FD 0x0100 +#define SGMII_CR_SPEED_SEL1_1G 0x0040 +#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \ + SGMII_CR_SPEED_SEL1_1G) + +/* SGMII Device Ability for SGMII defines */ +#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001 +#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0 + +/* Link timer define */ +#define LINK_TMR_L 0xa120 +#define LINK_TMR_H 0x0007 +#define LINK_TMR_L_BASEX 0xaf08 +#define LINK_TMR_H_BASEX 0x002f + +/* SGMII IF Mode defines */ +#define IF_MODE_USE_SGMII_AN 0x0002 +#define IF_MODE_SGMII_EN 0x0001 +#define IF_MODE_SGMII_SPEED_100M 0x0004 +#define IF_MODE_SGMII_SPEED_1G 0x0008 +#define IF_MODE_SGMII_DUPLEX_HALF 0x0010 + +/* Num of additional exact match MAC adr regs */ +#define MEMAC_NUM_OF_PADDRS 7 + +/* Control and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */ +#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */ +#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */ +#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */ +#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */ +#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */ +#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */ +#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */ +#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */ +#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */ +#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */ +#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */ + +/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */ +#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000 +#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF +#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000 +#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000 +#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019 +#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020 +#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060 + +#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \ +do { \ + _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \ + ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \ + (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\ + (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\ +} while (0) + +/* Interface Mode Register (IF_MODE) */ + +#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */ +#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */ +#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */ +#define IF_MODE_RGMII 0x00000004 +#define IF_MODE_RGMII_AUTO 0x00008000 +#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */ +#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */ +#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */ +#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */ +#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */ +#define IF_MODE_HD 0x00000040 /* Half duplex operation */ + +/* Hash table Control Register (HASHTABLE_CTRL) */ +#define HASH_CTRL_MCAST_EN 0x00000100 +/* 26-31 Hash table address code */ +#define HASH_CTRL_ADDR_MASK 0x0000003F +/* MAC mcast indication */ +#define GROUP_ADDRESS 0x0000010000000000LL +#define HASH_TABLE_SIZE 64 /* Hash tbl size */ + +/* Interrupt Mask Register (IMASK) */ +#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */ +#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */ +#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */ +#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */ + +#define MEMAC_ALL_ERRS_IMASK \ + ((u32)(MEMAC_IMASK_TSECC_ER | \ + MEMAC_IMASK_TECC_ER | \ + MEMAC_IMASK_RECC_ER | \ + MEMAC_IMASK_MGI)) + +#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */ +#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */ +#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */ +#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */ +#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error*/ +#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */ +#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */ +#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */ +#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */ +#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */ +#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */ +#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */ +#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */ +#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */ +#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */ +#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */ +#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */ + +#define DEFAULT_PAUSE_QUANTA 0xf000 +#define DEFAULT_FRAME_LENGTH 0x600 +#define DEFAULT_TX_IPG_LENGTH 12 + +#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF +#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000 +#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF +#define CLXY_PAUSE_THRESH_CLY_QTH 0xFFFF0000 + +struct mac_addr { + /* Lower 32 bits of 48-bit MAC address */ + u32 mac_addr_l; + /* Upper 16 bits of 48-bit MAC address */ + u32 mac_addr_u; +}; + +/* memory map */ +struct memac_regs { + u32 res0000[2]; /* General Control and Status */ + u32 command_config; /* 0x008 Ctrl and cfg */ + struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */ + u32 maxfrm; /* 0x014 Max frame length */ + u32 res0018[1]; + u32 rx_fifo_sections; /* Receive FIFO configuration reg */ + u32 tx_fifo_sections; /* Transmit FIFO configuration reg */ + u32 res0024[2]; + u32 hashtable_ctrl; /* 0x02C Hash table control */ + u32 res0030[4]; + u32 ievent; /* 0x040 Interrupt event */ + u32 tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */ + u32 res0048; + u32 imask; /* 0x04C Interrupt mask */ + u32 res0050; + u32 pause_quanta[4]; /* 0x054 Pause quanta */ + u32 pause_thresh[4]; /* 0x064 Pause quanta threshold */ + u32 rx_pause_status; /* 0x074 Receive pause status */ + u32 res0078[2]; + struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */ + u32 lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */ + u32 sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */ + u32 res00c0[8]; + u32 statn_config; /* 0x0E0 Statistics configuration */ + u32 res00e4[7]; + /* Rx Statistics Counter */ + u32 reoct_l; + u32 reoct_u; + u32 roct_l; + u32 roct_u; + u32 raln_l; + u32 raln_u; + u32 rxpf_l; + u32 rxpf_u; + u32 rfrm_l; + u32 rfrm_u; + u32 rfcs_l; + u32 rfcs_u; + u32 rvlan_l; + u32 rvlan_u; + u32 rerr_l; + u32 rerr_u; + u32 ruca_l; + u32 ruca_u; + u32 rmca_l; + u32 rmca_u; + u32 rbca_l; + u32 rbca_u; + u32 rdrp_l; + u32 rdrp_u; + u32 rpkt_l; + u32 rpkt_u; + u32 rund_l; + u32 rund_u; + u32 r64_l; + u32 r64_u; + u32 r127_l; + u32 r127_u; + u32 r255_l; + u32 r255_u; + u32 r511_l; + u32 r511_u; + u32 r1023_l; + u32 r1023_u; + u32 r1518_l; + u32 r1518_u; + u32 r1519x_l; + u32 r1519x_u; + u32 rovr_l; + u32 rovr_u; + u32 rjbr_l; + u32 rjbr_u; + u32 rfrg_l; + u32 rfrg_u; + u32 rcnp_l; + u32 rcnp_u; + u32 rdrntp_l; + u32 rdrntp_u; + u32 res01d0[12]; + /* Tx Statistics Counter */ + u32 teoct_l; + u32 teoct_u; + u32 toct_l; + u32 toct_u; + u32 res0210[2]; + u32 txpf_l; + u32 txpf_u; + u32 tfrm_l; + u32 tfrm_u; + u32 tfcs_l; + u32 tfcs_u; + u32 tvlan_l; + u32 tvlan_u; + u32 terr_l; + u32 terr_u; + u32 tuca_l; + u32 tuca_u; + u32 tmca_l; + u32 tmca_u; + u32 tbca_l; + u32 tbca_u; + u32 res0258[2]; + u32 tpkt_l; + u32 tpkt_u; + u32 tund_l; + u32 tund_u; + u32 t64_l; + u32 t64_u; + u32 t127_l; + u32 t127_u; + u32 t255_l; + u32 t255_u; + u32 t511_l; + u32 t511_u; + u32 t1023_l; + u32 t1023_u; + u32 t1518_l; + u32 t1518_u; + u32 t1519x_l; + u32 t1519x_u; + u32 res02a8[6]; + u32 tcnp_l; + u32 tcnp_u; + u32 res02c8[14]; + /* Line Interface Control */ + u32 if_mode; /* 0x300 Interface Mode Control */ + u32 if_status; /* 0x304 Interface Status */ + u32 res0308[14]; + /* HiGig/2 */ + u32 hg_config; /* 0x340 Control and cfg */ + u32 res0344[3]; + u32 hg_pause_quanta; /* 0x350 Pause quanta */ + u32 res0354[3]; + u32 hg_pause_thresh; /* 0x360 Pause quanta threshold */ + u32 res0364[3]; + u32 hgrx_pause_status; /* 0x370 Receive pause status */ + u32 hg_fifos_status; /* 0x374 fifos status */ + u32 rhm; /* 0x378 rx messages counter */ + u32 thm; /* 0x37C tx messages counter */ +}; + +struct memac_cfg { + bool reset_on_init; + bool pause_ignore; + bool promiscuous_mode_enable; + struct fixed_phy_status *fixed_link; + u16 max_frame_length; + u16 pause_quanta; + u32 tx_ipg_length; +}; + +struct fman_mac { + /* Pointer to MAC memory mapped registers */ + struct memac_regs __iomem *regs; + /* MAC address of device */ + u64 addr; + /* Ethernet physical interface */ + phy_interface_t phy_if; + u16 max_speed; + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *exception_cb; + fman_mac_exception_cb *event_cb; + /* Pointer to driver's global address hash table */ + struct eth_hash_t *multicast_addr_hash; + /* Pointer to driver's individual address hash table */ + struct eth_hash_t *unicast_addr_hash; + u8 mac_id; + u32 exceptions; + struct memac_cfg *memac_drv_param; + void *fm; + struct fman_rev_info fm_rev_info; + bool basex_if; + struct phy_device *pcsphy; +}; + +static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr, + u8 paddr_num) +{ + u32 tmp0, tmp1; + + tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24); + tmp1 = (u32)(adr[4] | adr[5] << 8); + + if (paddr_num == 0) { + iowrite32be(tmp0, ®s->mac_addr0.mac_addr_l); + iowrite32be(tmp1, ®s->mac_addr0.mac_addr_u); + } else { + iowrite32be(tmp0, ®s->mac_addr[paddr_num - 1].mac_addr_l); + iowrite32be(tmp1, ®s->mac_addr[paddr_num - 1].mac_addr_u); + } +} + +static int reset(struct memac_regs __iomem *regs) +{ + u32 tmp; + int count; + + tmp = ioread32be(®s->command_config); + + tmp |= CMD_CFG_SW_RESET; + + iowrite32be(tmp, ®s->command_config); + + count = 100; + do { + udelay(1); + } while ((ioread32be(®s->command_config) & CMD_CFG_SW_RESET) && + --count); + + if (count == 0) + return -EBUSY; + + return 0; +} + +static void set_exception(struct memac_regs __iomem *regs, u32 val, + bool enable) +{ + u32 tmp; + + tmp = ioread32be(®s->imask); + if (enable) + tmp |= val; + else + tmp &= ~val; + + iowrite32be(tmp, ®s->imask); +} + +static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, + phy_interface_t phy_if, u16 speed, bool slow_10g_if, + u32 exceptions) +{ + u32 tmp; + + /* Config */ + tmp = 0; + if (cfg->promiscuous_mode_enable) + tmp |= CMD_CFG_PROMIS_EN; + if (cfg->pause_ignore) + tmp |= CMD_CFG_PAUSE_IGNORE; + + /* Payload length check disable */ + tmp |= CMD_CFG_NO_LEN_CHK; + /* Enable padding of frames in transmit direction */ + tmp |= CMD_CFG_TX_PAD_EN; + + tmp |= CMD_CFG_CRC_FWD; + + iowrite32be(tmp, ®s->command_config); + + /* Max Frame Length */ + iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm); + + /* Pause Time */ + iowrite32be((u32)cfg->pause_quanta, ®s->pause_quanta[0]); + iowrite32be((u32)0, ®s->pause_thresh[0]); + + /* IF_MODE */ + tmp = 0; + switch (phy_if) { + case PHY_INTERFACE_MODE_XGMII: + tmp |= IF_MODE_XGMII; + break; + default: + tmp |= IF_MODE_GMII; + if (phy_if == PHY_INTERFACE_MODE_RGMII) + tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO; + } + iowrite32be(tmp, ®s->if_mode); + + /* TX_FIFO_SECTIONS */ + tmp = 0; + if (phy_if == PHY_INTERFACE_MODE_XGMII) { + if (slow_10g_if) { + tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); + } else { + tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); + } + } else { + tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G); + } + iowrite32be(tmp, ®s->tx_fifo_sections); + + /* clear all pending events and set-up interrupts */ + iowrite32be(0xffffffff, ®s->ievent); + set_exception(regs, exceptions, true); + + return 0; +} + +static void set_dflts(struct memac_cfg *cfg) +{ + cfg->reset_on_init = false; + cfg->promiscuous_mode_enable = false; + cfg->pause_ignore = false; + cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH; + cfg->max_frame_length = DEFAULT_FRAME_LENGTH; + cfg->pause_quanta = DEFAULT_PAUSE_QUANTA; +} + +static u32 get_mac_addr_hash_code(u64 eth_addr) +{ + u64 mask1, mask2; + u32 xor_val = 0; + u8 i, j; + + for (i = 0; i < 6; i++) { + mask1 = eth_addr & (u64)0x01; + eth_addr >>= 1; + + for (j = 0; j < 7; j++) { + mask2 = eth_addr & (u64)0x01; + mask1 ^= mask2; + eth_addr >>= 1; + } + + xor_val |= (mask1 << (5 - i)); + } + + return xor_val; +} + +static void setup_sgmii_internal_phy(struct fman_mac *memac, + struct fixed_phy_status *fixed_link) +{ + u16 tmp_reg16; + + /* SGMII mode */ + tmp_reg16 = IF_MODE_SGMII_EN; + if (!fixed_link) + /* AN enable */ + tmp_reg16 |= IF_MODE_USE_SGMII_AN; + else { + switch (fixed_link->speed) { + case 10: + /* For 10M: IF_MODE[SPEED_10M] = 0 */ + break; + case 100: + tmp_reg16 |= IF_MODE_SGMII_SPEED_100M; + break; + case 1000: /* fallthrough */ + default: + tmp_reg16 |= IF_MODE_SGMII_SPEED_1G; + break; + } + if (!fixed_link->duplex) + tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF; + } + phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16); + + /* Device ability according to SGMII specification */ + tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE; + phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); + + /* Adjust link timer for SGMII - + * According to Cisco SGMII specification the timer should be 1.6 ms. + * The link_timer register is configured in units of the clock. + * - When running as 1G SGMII, Serdes clock is 125 MHz, so + * unit = 1 / (125*10^6 Hz) = 8 ns. + * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40 + * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so + * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. + * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120. + * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, + * we always set up here a value of 2.5 SGMII. + */ + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H); + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L); + + if (!fixed_link) + /* Restart AN */ + tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; + else + /* AN disabled */ + tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN; + phy_write(memac->pcsphy, 0x0, tmp_reg16); +} + +static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac) +{ + u16 tmp_reg16; + + /* AN Device capability */ + tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE; + phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); + + /* Adjust link timer for SGMII - + * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms. + * The link_timer register is configured in units of the clock. + * - When running as 1G SGMII, Serdes clock is 125 MHz, so + * unit = 1 / (125*10^6 Hz) = 8 ns. + * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0 + * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so + * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. + * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08. + * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, + * we always set up here a value of 2.5 SGMII. + */ + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX); + phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX); + + /* Restart AN */ + tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; + phy_write(memac->pcsphy, 0x0, tmp_reg16); +} + +static int check_init_parameters(struct fman_mac *memac) +{ + if (memac->addr == 0) { + pr_err("Ethernet MAC must have a valid MAC address\n"); + return -EINVAL; + } + if (!memac->exception_cb) { + pr_err("Uninitialized exception handler\n"); + return -EINVAL; + } + if (!memac->event_cb) { + pr_warn("Uninitialize event handler\n"); + return -EINVAL; + } + + return 0; +} + +static int get_exception_flag(enum fman_mac_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FM_MAC_EX_10G_TX_ECC_ER: + bit_mask = MEMAC_IMASK_TECC_ER; + break; + case FM_MAC_EX_10G_RX_ECC_ER: + bit_mask = MEMAC_IMASK_RECC_ER; + break; + case FM_MAC_EX_TS_FIFO_ECC_ERR: + bit_mask = MEMAC_IMASK_TSECC_ER; + break; + case FM_MAC_EX_MAGIC_PACKET_INDICATION: + bit_mask = MEMAC_IMASK_MGI; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static void memac_err_exception(void *handle) +{ + struct fman_mac *memac = (struct fman_mac *)handle; + struct memac_regs __iomem *regs = memac->regs; + u32 event, imask; + + event = ioread32be(®s->ievent); + imask = ioread32be(®s->imask); + + /* Imask include both error and notification/event bits. + * Leaving only error bits enabled by imask. + * The imask error bits are shifted by 16 bits offset from + * their corresponding location in the ievent - hence the >> 16 + */ + event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16); + + iowrite32be(event, ®s->ievent); + + if (event & MEMAC_IEVNT_TS_ECC_ER) + memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR); + if (event & MEMAC_IEVNT_TX_ECC_ER) + memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER); + if (event & MEMAC_IEVNT_RX_ECC_ER) + memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER); +} + +static void memac_exception(void *handle) +{ + struct fman_mac *memac = (struct fman_mac *)handle; + struct memac_regs __iomem *regs = memac->regs; + u32 event, imask; + + event = ioread32be(®s->ievent); + imask = ioread32be(®s->imask); + + /* Imask include both error and notification/event bits. + * Leaving only error bits enabled by imask. + * The imask error bits are shifted by 16 bits offset from + * their corresponding location in the ievent - hence the >> 16 + */ + event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16); + + iowrite32be(event, ®s->ievent); + + if (event & MEMAC_IEVNT_MGI) + memac->exception_cb(memac->dev_id, + FM_MAC_EX_MAGIC_PACKET_INDICATION); +} + +static void free_init_resources(struct fman_mac *memac) +{ + fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_ERR); + + fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_NORMAL); + + /* release the driver's group hash table */ + free_hash_table(memac->multicast_addr_hash); + memac->multicast_addr_hash = NULL; + + /* release the driver's individual hash table */ + free_hash_table(memac->unicast_addr_hash); + memac->unicast_addr_hash = NULL; +} + +static bool is_init_done(struct memac_cfg *memac_drv_params) +{ + /* Checks if mEMAC driver parameters were initialized */ + if (!memac_drv_params) + return true; + + return false; +} + +int memac_enable(struct fman_mac *memac, enum comm_mode mode) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp |= CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp |= CMD_CFG_TX_EN; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_disable(struct fman_mac *memac, enum comm_mode mode) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp &= ~CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp &= ~CMD_CFG_TX_EN; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_set_promiscuous(struct fman_mac *memac, bool new_val) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (new_val) + tmp |= CMD_CFG_PROMIS_EN; + else + tmp &= ~CMD_CFG_PROMIS_EN; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_adjust_link(struct fman_mac *memac, u16 speed) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->if_mode); + + /* Set full duplex */ + tmp &= ~IF_MODE_HD; + + if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) { + /* Configure RGMII in manual mode */ + tmp &= ~IF_MODE_RGMII_AUTO; + tmp &= ~IF_MODE_RGMII_SP_MASK; + /* Full duplex */ + tmp |= IF_MODE_RGMII_FD; + + switch (speed) { + case SPEED_1000: + tmp |= IF_MODE_RGMII_1000; + break; + case SPEED_100: + tmp |= IF_MODE_RGMII_100; + break; + case SPEED_10: + tmp |= IF_MODE_RGMII_10; + break; + default: + break; + } + } + + iowrite32be(tmp, ®s->if_mode); + + return 0; +} + +int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val) +{ + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + memac->memac_drv_param->max_frame_length = new_val; + + return 0; +} + +int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable) +{ + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + memac->memac_drv_param->reset_on_init = enable; + + return 0; +} + +int memac_cfg_fixed_link(struct fman_mac *memac, + struct fixed_phy_status *fixed_link) +{ + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + memac->memac_drv_param->fixed_link = fixed_link; + + return 0; +} + +int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, + u16 pause_time, u16 thresh_time) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->tx_fifo_sections); + + GET_TX_EMPTY_DEFAULT_VALUE(tmp); + iowrite32be(tmp, ®s->tx_fifo_sections); + + tmp = ioread32be(®s->command_config); + tmp &= ~CMD_CFG_PFC_MODE; + priority = 0; + + iowrite32be(tmp, ®s->command_config); + + tmp = ioread32be(®s->pause_quanta[priority / 2]); + if (priority % 2) + tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT; + else + tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT; + tmp |= ((u32)pause_time << (16 * (priority % 2))); + iowrite32be(tmp, ®s->pause_quanta[priority / 2]); + + tmp = ioread32be(®s->pause_thresh[priority / 2]); + if (priority % 2) + tmp &= CLXY_PAUSE_THRESH_CLX_QTH; + else + tmp &= CLXY_PAUSE_THRESH_CLY_QTH; + tmp |= ((u32)thresh_time << (16 * (priority % 2))); + iowrite32be(tmp, ®s->pause_thresh[priority / 2]); + + return 0; +} + +int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) +{ + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (en) + tmp &= ~CMD_CFG_PAUSE_IGNORE; + else + tmp |= CMD_CFG_PAUSE_IGNORE; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr) +{ + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0); + + return 0; +} + +int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) +{ + struct memac_regs __iomem *regs = memac->regs; + struct eth_hash_entry *hash_entry; + u32 hash; + u64 addr; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + if (!(addr & GROUP_ADDRESS)) { + /* Unicast addresses not supported in hash */ + pr_err("Unicast Address\n"); + return -EINVAL; + } + hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK; + + /* Create element to be added to the driver hash table */ + hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + if (!hash_entry) + return -ENOMEM; + hash_entry->addr = addr; + INIT_LIST_HEAD(&hash_entry->node); + + list_add_tail(&hash_entry->node, + &memac->multicast_addr_hash->lsts[hash]); + iowrite32be(hash | HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl); + + return 0; +} + +int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) +{ + struct memac_regs __iomem *regs = memac->regs; + struct eth_hash_entry *hash_entry = NULL; + struct list_head *pos; + u32 hash; + u64 addr; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK; + + list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&memac->multicast_addr_hash->lsts[hash])) + iowrite32be(hash & ~HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl); + + return 0; +} + +int memac_set_exception(struct fman_mac *memac, + enum fman_mac_exceptions exception, bool enable) +{ + u32 bit_mask = 0; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + memac->exceptions |= bit_mask; + else + memac->exceptions &= ~bit_mask; + } else { + pr_err("Undefined exception\n"); + return -EINVAL; + } + set_exception(memac->regs, bit_mask, enable); + + return 0; +} + +int memac_init(struct fman_mac *memac) +{ + struct memac_cfg *memac_drv_param; + u8 i; + enet_addr_t eth_addr; + bool slow_10g_if = false; + struct fixed_phy_status *fixed_link; + int err; + u32 reg32 = 0; + + if (is_init_done(memac->memac_drv_param)) + return -EINVAL; + + err = check_init_parameters(memac); + if (err) + return err; + + memac_drv_param = memac->memac_drv_param; + + if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4) + slow_10g_if = true; + + /* First, reset the MAC if desired. */ + if (memac_drv_param->reset_on_init) { + err = reset(memac->regs); + if (err) { + pr_err("mEMAC reset failed\n"); + return err; + } + } + + /* MAC Address */ + MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr); + add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0); + + fixed_link = memac_drv_param->fixed_link; + + init(memac->regs, memac->memac_drv_param, memac->phy_if, + memac->max_speed, slow_10g_if, memac->exceptions); + + /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround + * Exists only in FMan 6.0 and 6.3. + */ + if ((memac->fm_rev_info.major == 6) && + ((memac->fm_rev_info.minor == 0) || + (memac->fm_rev_info.minor == 3))) { + /* MAC strips CRC from received frames - this workaround + * should decrease the likelihood of bug appearance + */ + reg32 = ioread32be(&memac->regs->command_config); + reg32 &= ~CMD_CFG_CRC_FWD; + iowrite32be(reg32, &memac->regs->command_config); + } + + if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { + /* Configure internal SGMII PHY */ + if (memac->basex_if) + setup_sgmii_internal_phy_base_x(memac); + else + setup_sgmii_internal_phy(memac, fixed_link); + } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { + /* Configure 4 internal SGMII PHYs */ + for (i = 0; i < 4; i++) { + u8 qsmgii_phy_addr, phy_addr; + /* QSGMII PHY address occupies 3 upper bits of 5-bit + * phy_address; the lower 2 bits are used to extend + * register address space and access each one of 4 + * ports inside QSGMII. + */ + phy_addr = memac->pcsphy->mdio.addr; + qsmgii_phy_addr = (u8)((phy_addr << 2) | i); + memac->pcsphy->mdio.addr = qsmgii_phy_addr; + if (memac->basex_if) + setup_sgmii_internal_phy_base_x(memac); + else + setup_sgmii_internal_phy(memac, fixed_link); + + memac->pcsphy->mdio.addr = phy_addr; + } + } + + /* Max Frame Length */ + err = fman_set_mac_max_frame(memac->fm, memac->mac_id, + memac_drv_param->max_frame_length); + if (err) { + pr_err("settings Mac max frame length is FAILED\n"); + return err; + } + + memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE); + if (!memac->multicast_addr_hash) { + free_init_resources(memac); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE); + if (!memac->unicast_addr_hash) { + free_init_resources(memac); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_ERR, memac_err_exception, memac); + + fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, + FMAN_INTR_TYPE_NORMAL, memac_exception, memac); + + kfree(memac_drv_param); + memac->memac_drv_param = NULL; + + return 0; +} + +int memac_free(struct fman_mac *memac) +{ + free_init_resources(memac); + + kfree(memac->memac_drv_param); + kfree(memac); + + return 0; +} + +struct fman_mac *memac_config(struct fman_mac_params *params) +{ + struct fman_mac *memac; + struct memac_cfg *memac_drv_param; + void __iomem *base_addr; + + base_addr = params->base_addr; + /* allocate memory for the m_emac data structure */ + memac = kzalloc(sizeof(*memac), GFP_KERNEL); + if (!memac) + return NULL; + + /* allocate memory for the m_emac driver parameters data structure */ + memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL); + if (!memac_drv_param) { + memac_free(memac); + return NULL; + } + + /* Plant parameter structure pointer */ + memac->memac_drv_param = memac_drv_param; + + set_dflts(memac_drv_param); + + memac->addr = ENET_ADDR_TO_UINT64(params->addr); + + memac->regs = base_addr; + memac->max_speed = params->max_speed; + memac->phy_if = params->phy_if; + memac->mac_id = params->mac_id; + memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | + MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI); + memac->exception_cb = params->exception_cb; + memac->event_cb = params->event_cb; + memac->dev_id = params->dev_id; + memac->fm = params->fm; + memac->basex_if = params->basex_if; + + /* Save FMan revision */ + fman_get_revision(memac->fm, &memac->fm_rev_info); + + if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { + if (!params->internal_phy_node) { + pr_err("PCS PHY node is not available\n"); + memac_free(memac); + return NULL; + } + + memac->pcsphy = of_phy_find_device(params->internal_phy_node); + if (!memac->pcsphy) { + pr_err("of_phy_find_device (PCS PHY) failed\n"); + memac_free(memac); + return NULL; + } + } + + return memac; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h new file mode 100644 index 000000000000..173d8e0fd716 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -0,0 +1,60 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEMAC_H +#define __MEMAC_H + +#include "fman_mac.h" + +#include <linux/netdevice.h> + +struct fman_mac *memac_config(struct fman_mac_params *params); +int memac_set_promiscuous(struct fman_mac *memac, bool new_val); +int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr); +int memac_adjust_link(struct fman_mac *memac, u16 speed); +int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val); +int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable); +int memac_cfg_fixed_link(struct fman_mac *memac, + struct fixed_phy_status *fixed_link); +int memac_enable(struct fman_mac *memac, enum comm_mode mode); +int memac_disable(struct fman_mac *memac, enum comm_mode mode); +int memac_init(struct fman_mac *memac); +int memac_free(struct fman_mac *memac); +int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en); +int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, + u16 pause_time, u16 thresh_time); +int memac_set_exception(struct fman_mac *memac, + enum fman_mac_exceptions exception, bool enable); +int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); +int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); + +#endif /* __MEMAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c new file mode 100644 index 000000000000..4eb0e9ac7182 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c @@ -0,0 +1,158 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fman_muram.h" + +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/genalloc.h> + +struct muram_info { + struct gen_pool *pool; + void __iomem *vbase; + size_t size; + phys_addr_t pbase; +}; + +static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram, + unsigned long vaddr) +{ + return vaddr - (unsigned long)muram->vbase; +} + +/** + * fman_muram_init + * @base: Pointer to base of memory mapped FM-MURAM. + * @size: Size of the FM-MURAM partition. + * + * Creates partition in the MURAM. + * The routine returns a pointer to the MURAM partition. + * This pointer must be passed as to all other FM-MURAM function calls. + * No actual initialization or configuration of FM_MURAM hardware is done by + * this routine. + * + * Return: pointer to FM-MURAM object, or NULL for Failure. + */ +struct muram_info *fman_muram_init(phys_addr_t base, size_t size) +{ + struct muram_info *muram; + void __iomem *vaddr; + int ret; + + muram = kzalloc(sizeof(*muram), GFP_KERNEL); + if (!muram) + return NULL; + + muram->pool = gen_pool_create(ilog2(64), -1); + if (!muram->pool) { + pr_err("%s(): MURAM pool create failed\n", __func__); + goto muram_free; + } + + vaddr = ioremap(base, size); + if (!vaddr) { + pr_err("%s(): MURAM ioremap failed\n", __func__); + goto pool_destroy; + } + + ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr, + base, size, -1); + if (ret < 0) { + pr_err("%s(): MURAM pool add failed\n", __func__); + iounmap(vaddr); + goto pool_destroy; + } + + memset_io(vaddr, 0, (int)size); + + muram->vbase = vaddr; + muram->pbase = base; + return muram; + +pool_destroy: + gen_pool_destroy(muram->pool); +muram_free: + kfree(muram); + return NULL; +} + +/** + * fman_muram_offset_to_vbase + * @muram: FM-MURAM module pointer. + * @offset: the offset of the memory block + * + * Gives the address of the memory region from specific offset + * + * Return: The address of the memory block + */ +unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, + unsigned long offset) +{ + return offset + (unsigned long)muram->vbase; +} + +/** + * fman_muram_alloc + * @muram: FM-MURAM module pointer. + * @size: Size of the memory to be allocated. + * + * Allocate some memory from FM-MURAM partition. + * + * Return: address of the allocated memory; NULL otherwise. + */ +int fman_muram_alloc(struct muram_info *muram, size_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(muram->pool, size); + if (!vaddr) + return -ENOMEM; + + memset_io((void __iomem *)vaddr, 0, size); + + return fman_muram_vbase_to_offset(muram, vaddr); +} + +/** + * fman_muram_free_mem + * muram: FM-MURAM module pointer. + * offset: offset of the memory region to be freed. + * size: size of the memory to be freed. + * + * Free an allocated memory from FM-MURAM partition. + */ +void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size) +{ + unsigned long addr = fman_muram_offset_to_vbase(muram, offset); + + gen_pool_free(muram->pool, addr, size); +} diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h new file mode 100644 index 000000000000..dbf0af9e5bb5 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_muram.h @@ -0,0 +1,51 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FM_MURAM_EXT +#define __FM_MURAM_EXT + +#include <linux/types.h> + +#define FM_MURAM_INVALID_ALLOCATION -1 + +/* Structure for FM MURAM information */ +struct muram_info; + +struct muram_info *fman_muram_init(phys_addr_t base, size_t size); + +unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, + unsigned long offset); + +int fman_muram_alloc(struct muram_info *muram, size_t size); + +void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size); + +#endif /* __FM_MURAM_EXT */ diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c new file mode 100644 index 000000000000..70c198d072dc --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -0,0 +1,1778 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_port.h" +#include "fman.h" +#include "fman_sp.h" + +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/delay.h> +#include <linux/libfdt_env.h> + +/* Queue ID */ +#define DFLT_FQ_ID 0x00FFFFFF + +/* General defines */ +#define PORT_BMI_FIFO_UNITS 0x100 + +#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) \ + min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS) + +#define PORT_CG_MAP_NUM 8 +#define PORT_PRS_RESULT_WORDS_NUM 8 +#define PORT_IC_OFFSET_UNITS 0x10 + +#define MIN_EXT_BUF_SIZE 64 + +#define BMI_PORT_REGS_OFFSET 0 +#define QMI_PORT_REGS_OFFSET 0x400 + +/* Default values */ +#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \ + DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN + +#define DFLT_PORT_CUT_BYTES_FROM_END 4 + +#define DFLT_PORT_ERRORS_TO_DISCARD FM_PORT_FRM_ERR_CLS_DISCARD +#define DFLT_PORT_MAX_FRAME_LENGTH 9600 + +#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \ + MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) + +#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size) \ + (major == 6 ? \ + MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) : \ + (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4)) \ + +#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS 0 + +/* QMI defines */ +#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f + +#define QMI_PORT_CFG_EN 0x80000000 +#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000 + +#define QMI_DEQ_CFG_PRI 0x80000000 +#define QMI_DEQ_CFG_TYPE1 0x10000000 +#define QMI_DEQ_CFG_TYPE2 0x20000000 +#define QMI_DEQ_CFG_TYPE3 0x30000000 +#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000 +#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000 +#define QMI_DEQ_CFG_SP_MASK 0xf +#define QMI_DEQ_CFG_SP_SHIFT 20 + +#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type) \ + (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400) + +/* BMI defins */ +#define BMI_EBD_EN 0x80000000 + +#define BMI_PORT_CFG_EN 0x80000000 + +#define BMI_PORT_STATUS_BSY 0x80000000 + +#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT +#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE + +#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16 +#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000 + +#define BMI_FRAME_END_CS_IGNORE_SHIFT 24 +#define BMI_FRAME_END_CS_IGNORE_MASK 0x0000001f + +#define BMI_RX_FRAME_END_CUT_SHIFT 16 +#define BMI_RX_FRAME_END_CUT_MASK 0x0000001f + +#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT +#define BMI_IC_TO_EXT_MASK 0x0000001f +#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT +#define BMI_IC_FROM_INT_MASK 0x0000000f +#define BMI_IC_SIZE_MASK 0x0000001f + +#define BMI_INT_BUF_MARG_SHIFT 28 +#define BMI_INT_BUF_MARG_MASK 0x0000000f +#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT +#define BMI_EXT_BUF_MARG_START_MASK 0x000001ff +#define BMI_EXT_BUF_MARG_END_MASK 0x000001ff + +#define BMI_CMD_MR_LEAC 0x00200000 +#define BMI_CMD_MR_SLEAC 0x00100000 +#define BMI_CMD_MR_MA 0x00080000 +#define BMI_CMD_MR_DEAS 0x00040000 +#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \ + BMI_CMD_MR_SLEAC | \ + BMI_CMD_MR_MA | \ + BMI_CMD_MR_DEAS) +#define BMI_CMD_TX_MR_DEF 0 + +#define BMI_CMD_ATTR_ORDER 0x80000000 +#define BMI_CMD_ATTR_SYNC 0x02000000 +#define BMI_CMD_ATTR_COLOR_SHIFT 26 + +#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12 +#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000000f +#define BMI_NEXT_ENG_FD_BITS_SHIFT 24 + +#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID +#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER +#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP +#define BMI_EXT_BUF_POOL_ID_SHIFT 16 +#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000 +#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16 + +#define BMI_TX_FIFO_MIN_FILL_SHIFT 16 + +#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS) +#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS) + +#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \ + ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1) + +#define RX_ERRS_TO_ENQ \ + (FM_PORT_FRM_ERR_DMA | \ + FM_PORT_FRM_ERR_PHYSICAL | \ + FM_PORT_FRM_ERR_SIZE | \ + FM_PORT_FRM_ERR_EXTRACTION | \ + FM_PORT_FRM_ERR_NO_SCHEME | \ + FM_PORT_FRM_ERR_PRS_TIMEOUT | \ + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \ + FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \ + FM_PORT_FRM_ERR_PRS_HDR_ERR | \ + FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \ + FM_PORT_FRM_ERR_IPRE) + +/* NIA defines */ +#define NIA_ORDER_RESTOR 0x00800000 +#define NIA_ENG_BMI 0x00500000 +#define NIA_ENG_QMI_ENQ 0x00540000 +#define NIA_ENG_QMI_DEQ 0x00580000 + +#define NIA_BMI_AC_ENQ_FRAME 0x00000002 +#define NIA_BMI_AC_TX_RELEASE 0x000002C0 +#define NIA_BMI_AC_RELEASE 0x000000C0 +#define NIA_BMI_AC_TX 0x00000274 +#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c + +/* Port IDs */ +#define TX_10G_PORT_BASE 0x30 +#define RX_10G_PORT_BASE 0x10 + +/* BMI Rx port register map */ +struct fman_port_rx_bmi_regs { + u32 fmbm_rcfg; /* Rx Configuration */ + u32 fmbm_rst; /* Rx Status */ + u32 fmbm_rda; /* Rx DMA attributes */ + u32 fmbm_rfp; /* Rx FIFO Parameters */ + u32 fmbm_rfed; /* Rx Frame End Data */ + u32 fmbm_ricp; /* Rx Internal Context Parameters */ + u32 fmbm_rim; /* Rx Internal Buffer Margins */ + u32 fmbm_rebm; /* Rx External Buffer Margins */ + u32 fmbm_rfne; /* Rx Frame Next Engine */ + u32 fmbm_rfca; /* Rx Frame Command Attributes. */ + u32 fmbm_rfpne; /* Rx Frame Parser Next Engine */ + u32 fmbm_rpso; /* Rx Parse Start Offset */ + u32 fmbm_rpp; /* Rx Policer Profile */ + u32 fmbm_rccb; /* Rx Coarse Classification Base */ + u32 fmbm_reth; /* Rx Excessive Threshold */ + u32 reserved003c[1]; /* (0x03C 0x03F) */ + u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM]; + /* Rx Parse Results Array Init */ + u32 fmbm_rfqid; /* Rx Frame Queue ID */ + u32 fmbm_refqid; /* Rx Error Frame Queue ID */ + u32 fmbm_rfsdm; /* Rx Frame Status Discard Mask */ + u32 fmbm_rfsem; /* Rx Frame Status Error Mask */ + u32 fmbm_rfene; /* Rx Frame Enqueue Next Engine */ + u32 reserved0074[0x2]; /* (0x074-0x07C) */ + u32 fmbm_rcmne; /* Rx Frame Continuous Mode Next Engine */ + u32 reserved0080[0x20]; /* (0x080 0x0FF) */ + u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM]; + /* Buffer Manager pool Information- */ + u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM]; /* Allocate Counter- */ + u32 reserved0130[8]; /* 0x130/0x140 - 0x15F reserved - */ + u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */ + u32 fmbm_mpd; /* BM Pool Depletion */ + u32 reserved0184[0x1F]; /* (0x184 0x1FF) */ + u32 fmbm_rstc; /* Rx Statistics Counters */ + u32 fmbm_rfrc; /* Rx Frame Counter */ + u32 fmbm_rfbc; /* Rx Bad Frames Counter */ + u32 fmbm_rlfc; /* Rx Large Frames Counter */ + u32 fmbm_rffc; /* Rx Filter Frames Counter */ + u32 fmbm_rfdc; /* Rx Frame Discard Counter */ + u32 fmbm_rfldec; /* Rx Frames List DMA Error Counter */ + u32 fmbm_rodc; /* Rx Out of Buffers Discard nntr */ + u32 fmbm_rbdc; /* Rx Buffers Deallocate Counter */ + u32 fmbm_rpec; /* RX Prepare to enqueue Counte */ + u32 reserved0224[0x16]; /* (0x224 0x27F) */ + u32 fmbm_rpc; /* Rx Performance Counters */ + u32 fmbm_rpcp; /* Rx Performance Count Parameters */ + u32 fmbm_rccn; /* Rx Cycle Counter */ + u32 fmbm_rtuc; /* Rx Tasks Utilization Counter */ + u32 fmbm_rrquc; /* Rx Receive Queue Utilization cntr */ + u32 fmbm_rduc; /* Rx DMA Utilization Counter */ + u32 fmbm_rfuc; /* Rx FIFO Utilization Counter */ + u32 fmbm_rpac; /* Rx Pause Activation Counter */ + u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */ + u32 fmbm_rdcfg[0x3]; /* Rx Debug Configuration */ + u32 fmbm_rgpr; /* Rx General Purpose Register */ + u32 reserved0310[0x3a]; +}; + +/* BMI Tx port register map */ +struct fman_port_tx_bmi_regs { + u32 fmbm_tcfg; /* Tx Configuration */ + u32 fmbm_tst; /* Tx Status */ + u32 fmbm_tda; /* Tx DMA attributes */ + u32 fmbm_tfp; /* Tx FIFO Parameters */ + u32 fmbm_tfed; /* Tx Frame End Data */ + u32 fmbm_ticp; /* Tx Internal Context Parameters */ + u32 fmbm_tfdne; /* Tx Frame Dequeue Next Engine. */ + u32 fmbm_tfca; /* Tx Frame Command attribute. */ + u32 fmbm_tcfqid; /* Tx Confirmation Frame Queue ID. */ + u32 fmbm_tefqid; /* Tx Frame Error Queue ID */ + u32 fmbm_tfene; /* Tx Frame Enqueue Next Engine */ + u32 fmbm_trlmts; /* Tx Rate Limiter Scale */ + u32 fmbm_trlmt; /* Tx Rate Limiter */ + u32 reserved0034[0x0e]; /* (0x034-0x6c) */ + u32 fmbm_tccb; /* Tx Coarse Classification base */ + u32 fmbm_tfne; /* Tx Frame Next Engine */ + u32 fmbm_tpfcm[0x02]; + /* Tx Priority based Flow Control (PFC) Mapping */ + u32 fmbm_tcmne; /* Tx Frame Continuous Mode Next Engine */ + u32 reserved0080[0x60]; /* (0x080-0x200) */ + u32 fmbm_tstc; /* Tx Statistics Counters */ + u32 fmbm_tfrc; /* Tx Frame Counter */ + u32 fmbm_tfdc; /* Tx Frames Discard Counter */ + u32 fmbm_tfledc; /* Tx Frame len error discard cntr */ + u32 fmbm_tfufdc; /* Tx Frame unsprt frmt discard cntr */ + u32 fmbm_tbdc; /* Tx Buffers Deallocate Counter */ + u32 reserved0218[0x1A]; /* (0x218-0x280) */ + u32 fmbm_tpc; /* Tx Performance Counters */ + u32 fmbm_tpcp; /* Tx Performance Count Parameters */ + u32 fmbm_tccn; /* Tx Cycle Counter */ + u32 fmbm_ttuc; /* Tx Tasks Utilization Counter */ + u32 fmbm_ttcquc; /* Tx Transmit conf Q util Counter */ + u32 fmbm_tduc; /* Tx DMA Utilization Counter */ + u32 fmbm_tfuc; /* Tx FIFO Utilization Counter */ + u32 reserved029c[16]; /* (0x29C-0x2FF) */ + u32 fmbm_tdcfg[0x3]; /* Tx Debug Configuration */ + u32 fmbm_tgpr; /* Tx General Purpose Register */ + u32 reserved0310[0x3a]; /* (0x310-0x3FF) */ +}; + +/* BMI port register map */ +union fman_port_bmi_regs { + struct fman_port_rx_bmi_regs rx; + struct fman_port_tx_bmi_regs tx; +}; + +/* QMI port register map */ +struct fman_port_qmi_regs { + u32 fmqm_pnc; /* PortID n Configuration Register */ + u32 fmqm_pns; /* PortID n Status Register */ + u32 fmqm_pnts; /* PortID n Task Status Register */ + u32 reserved00c[4]; /* 0xn00C - 0xn01B */ + u32 fmqm_pnen; /* PortID n Enqueue NIA Register */ + u32 fmqm_pnetfc; /* PortID n Enq Total Frame Counter */ + u32 reserved024[2]; /* 0xn024 - 0x02B */ + u32 fmqm_pndn; /* PortID n Dequeue NIA Register */ + u32 fmqm_pndc; /* PortID n Dequeue Config Register */ + u32 fmqm_pndtfc; /* PortID n Dequeue tot Frame cntr */ + u32 fmqm_pndfdc; /* PortID n Dequeue FQID Dflt Cntr */ + u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */ +}; + +/* QMI dequeue prefetch modes */ +enum fman_port_deq_prefetch { + FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */ + FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */ + FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */ +}; + +/* A structure for defining FM port resources */ +struct fman_port_rsrc { + u32 num; /* Committed required resource */ + u32 extra; /* Extra (not committed) required resource */ +}; + +enum fman_port_dma_swap { + FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */ + FMAN_PORT_DMA_SWAP_LE, + /* The transferred data should be swapped in PPC Little Endian mode */ + FMAN_PORT_DMA_SWAP_BE + /* The transferred data should be swapped in Big Endian mode */ +}; + +/* Default port color */ +enum fman_port_color { + FMAN_PORT_COLOR_GREEN, /* Default port color is green */ + FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */ + FMAN_PORT_COLOR_RED, /* Default port color is red */ + FMAN_PORT_COLOR_OVERRIDE /* Ignore color */ +}; + +/* QMI dequeue from the SP channel - types */ +enum fman_port_deq_type { + FMAN_PORT_DEQ_BY_PRI, + /* Priority precedence and Intra-Class scheduling */ + FMAN_PORT_DEQ_ACTIVE_FQ, + /* Active FQ precedence and Intra-Class scheduling */ + FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS + /* Active FQ precedence and override Intra-Class scheduling */ +}; + +/* External buffer pools configuration */ +struct fman_port_bpools { + u8 count; /* Num of pools to set up */ + bool counters_enable; /* Enable allocate counters */ + u8 grp_bp_depleted_num; + /* Number of depleted pools - if reached the BMI indicates + * the MAC to send a pause frame + */ + struct { + u8 bpid; /* BM pool ID */ + u16 size; + /* Pool's size - must be in ascending order */ + bool is_backup; + /* If this is a backup pool */ + bool grp_bp_depleted; + /* Consider this buffer in multiple pools depletion criteria */ + bool single_bp_depleted; + /* Consider this buffer in single pool depletion criteria */ + } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM]; +}; + +struct fman_port_cfg { + u32 dflt_fqid; + u32 err_fqid; + u8 deq_sp; + bool deq_high_priority; + enum fman_port_deq_type deq_type; + enum fman_port_deq_prefetch deq_prefetch_option; + u16 deq_byte_cnt; + u8 cheksum_last_bytes_ignore; + u8 rx_cut_end_bytes; + struct fman_buf_pool_depletion buf_pool_depletion; + struct fman_ext_pools ext_buf_pools; + u32 tx_fifo_min_level; + u32 tx_fifo_low_comf_level; + u32 rx_pri_elevation; + u32 rx_fifo_thr; + struct fman_sp_buf_margins buf_margins; + u32 int_buf_start_margin; + struct fman_sp_int_context_data_copy int_context; + u32 discard_mask; + u32 err_mask; + struct fman_buffer_prefix_content buffer_prefix_content; + bool dont_release_buf; + + u8 rx_fd_bits; + u32 tx_fifo_deq_pipeline_depth; + bool errata_A006320; + bool excessive_threshold_register; + bool fmbm_tfne_has_features; + + enum fman_port_dma_swap dma_swap_data; + enum fman_port_color color; +}; + +struct fman_port_rx_pools_params { + u8 num_of_pools; + u16 second_largest_buf_size; + u16 largest_buf_size; +}; + +struct fman_port_dts_params { + void __iomem *base_addr; /* FMan port virtual memory */ + enum fman_port_type type; /* Port type */ + u16 speed; /* Port speed */ + u8 id; /* HW Port Id */ + u32 qman_channel_id; /* QMan channel id (non RX only) */ + struct fman *fman; /* FMan Handle */ +}; + +struct fman_port { + void *fm; + struct device *dev; + struct fman_rev_info rev_info; + u8 port_id; + enum fman_port_type port_type; + u16 port_speed; + + union fman_port_bmi_regs __iomem *bmi_regs; + struct fman_port_qmi_regs __iomem *qmi_regs; + + struct fman_sp_buffer_offsets buffer_offsets; + + u8 internal_buf_offset; + struct fman_ext_pools ext_buf_pools; + + u16 max_frame_length; + struct fman_port_rsrc open_dmas; + struct fman_port_rsrc tasks; + struct fman_port_rsrc fifo_bufs; + struct fman_port_rx_pools_params rx_pools_params; + + struct fman_port_cfg *cfg; + struct fman_port_dts_params dts_params; + + u8 ext_pools_num; + u32 max_port_fifo_size; + u32 max_num_of_ext_pools; + u32 max_num_of_sub_portals; + u32 bm_max_num_of_pools; +}; + +static int init_bmi_rx(struct fman_port *port) +{ + struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx; + struct fman_port_cfg *cfg = port->cfg; + u32 tmp; + + /* DMA attributes */ + tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT; + /* Enable write optimization */ + tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE; + iowrite32be(tmp, ®s->fmbm_rda); + + /* Rx FIFO parameters */ + tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) << + BMI_RX_FIFO_PRI_ELEVATION_SHIFT; + tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1; + iowrite32be(tmp, ®s->fmbm_rfp); + + if (cfg->excessive_threshold_register) + /* always allow access to the extra resources */ + iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, ®s->fmbm_reth); + + /* Frame end data */ + tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) << + BMI_FRAME_END_CS_IGNORE_SHIFT; + tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) << + BMI_RX_FRAME_END_CUT_SHIFT; + if (cfg->errata_A006320) + tmp &= 0xffe0ffff; + iowrite32be(tmp, ®s->fmbm_rfed); + + /* Internal context parameters */ + tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT; + tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT; + tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) & + BMI_IC_SIZE_MASK; + iowrite32be(tmp, ®s->fmbm_ricp); + + /* Internal buffer offset */ + tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) & + BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT; + iowrite32be(tmp, ®s->fmbm_rim); + + /* External buffer margins */ + tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) << + BMI_EXT_BUF_MARG_START_SHIFT; + tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK; + iowrite32be(tmp, ®s->fmbm_rebm); + + /* Frame attributes */ + tmp = BMI_CMD_RX_MR_DEF; + tmp |= BMI_CMD_ATTR_ORDER; + tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT; + /* Synchronization request */ + tmp |= BMI_CMD_ATTR_SYNC; + + iowrite32be(tmp, ®s->fmbm_rfca); + + /* NIA */ + tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT; + + tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME; + iowrite32be(tmp, ®s->fmbm_rfne); + + /* Enqueue NIA */ + iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_rfene); + + /* Default/error queues */ + iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), ®s->fmbm_rfqid); + iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_refqid); + + /* Discard/error masks */ + iowrite32be(cfg->discard_mask, ®s->fmbm_rfsdm); + iowrite32be(cfg->err_mask, ®s->fmbm_rfsem); + + return 0; +} + +static int init_bmi_tx(struct fman_port *port) +{ + struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx; + struct fman_port_cfg *cfg = port->cfg; + u32 tmp; + + /* Tx Configuration register */ + tmp = 0; + iowrite32be(tmp, ®s->fmbm_tcfg); + + /* DMA attributes */ + tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT; + iowrite32be(tmp, ®s->fmbm_tda); + + /* Tx FIFO parameters */ + tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) << + BMI_TX_FIFO_MIN_FILL_SHIFT; + tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) & + BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT; + tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1; + iowrite32be(tmp, ®s->fmbm_tfp); + + /* Frame end data */ + tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) << + BMI_FRAME_END_CS_IGNORE_SHIFT; + iowrite32be(tmp, ®s->fmbm_tfed); + + /* Internal context parameters */ + tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT; + tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) & + BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT; + tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) & + BMI_IC_SIZE_MASK; + iowrite32be(tmp, ®s->fmbm_ticp); + + /* Frame attributes */ + tmp = BMI_CMD_TX_MR_DEF; + tmp |= BMI_CMD_ATTR_ORDER; + tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT; + iowrite32be(tmp, ®s->fmbm_tfca); + + /* Dequeue NIA + enqueue NIA */ + iowrite32be(NIA_ENG_QMI_DEQ, ®s->fmbm_tfdne); + iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_tfene); + if (cfg->fmbm_tfne_has_features) + iowrite32be(!cfg->dflt_fqid ? + BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME : + NIA_BMI_AC_FETCH_ALL_FRAME, ®s->fmbm_tfne); + if (!cfg->dflt_fqid && cfg->dont_release_buf) { + iowrite32be(DFLT_FQ_ID, ®s->fmbm_tcfqid); + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, + ®s->fmbm_tfene); + if (cfg->fmbm_tfne_has_features) + iowrite32be(ioread32be(®s->fmbm_tfne) & ~BMI_EBD_EN, + ®s->fmbm_tfne); + } + + /* Confirmation/error queues */ + if (cfg->dflt_fqid || !cfg->dont_release_buf) + iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, ®s->fmbm_tcfqid); + iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_tefqid); + + return 0; +} + +static int init_qmi(struct fman_port *port) +{ + struct fman_port_qmi_regs __iomem *regs = port->qmi_regs; + struct fman_port_cfg *cfg = port->cfg; + u32 tmp; + + /* Rx port configuration */ + if (port->port_type == FMAN_PORT_TYPE_RX) { + /* Enqueue NIA */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, ®s->fmqm_pnen); + return 0; + } + + /* Continue with Tx port configuration */ + if (port->port_type == FMAN_PORT_TYPE_TX) { + /* Enqueue NIA */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, + ®s->fmqm_pnen); + /* Dequeue NIA */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, ®s->fmqm_pndn); + } + + /* Dequeue Configuration register */ + tmp = 0; + if (cfg->deq_high_priority) + tmp |= QMI_DEQ_CFG_PRI; + + switch (cfg->deq_type) { + case FMAN_PORT_DEQ_BY_PRI: + tmp |= QMI_DEQ_CFG_TYPE1; + break; + case FMAN_PORT_DEQ_ACTIVE_FQ: + tmp |= QMI_DEQ_CFG_TYPE2; + break; + case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS: + tmp |= QMI_DEQ_CFG_TYPE3; + break; + default: + return -EINVAL; + } + + switch (cfg->deq_prefetch_option) { + case FMAN_PORT_DEQ_NO_PREFETCH: + break; + case FMAN_PORT_DEQ_PART_PREFETCH: + tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL; + break; + case FMAN_PORT_DEQ_FULL_PREFETCH: + tmp |= QMI_DEQ_CFG_PREFETCH_FULL; + break; + default: + return -EINVAL; + } + + tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT; + tmp |= cfg->deq_byte_cnt; + iowrite32be(tmp, ®s->fmqm_pndc); + + return 0; +} + +static int init(struct fman_port *port) +{ + int err; + + /* Init BMI registers */ + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + err = init_bmi_rx(port); + break; + case FMAN_PORT_TYPE_TX: + err = init_bmi_tx(port); + break; + default: + return -EINVAL; + } + + if (err) + return err; + + /* Init QMI registers */ + err = init_qmi(port); + return err; + + return 0; +} + +static int set_bpools(const struct fman_port *port, + const struct fman_port_bpools *bp) +{ + u32 __iomem *bp_reg, *bp_depl_reg; + u32 tmp; + u8 i, max_bp_num; + bool grp_depl_used = false, rx_port; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + max_bp_num = port->ext_pools_num; + rx_port = true; + bp_reg = port->bmi_regs->rx.fmbm_ebmpi; + bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd; + break; + default: + return -EINVAL; + } + + if (rx_port) { + /* Check buffers are provided in ascending order */ + for (i = 0; (i < (bp->count - 1) && + (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) { + if (bp->bpool[i].size > bp->bpool[i + 1].size) + return -EINVAL; + } + } + + /* Set up external buffers pools */ + for (i = 0; i < bp->count; i++) { + tmp = BMI_EXT_BUF_POOL_VALID; + tmp |= ((u32)bp->bpool[i].bpid << + BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK; + + if (rx_port) { + if (bp->counters_enable) + tmp |= BMI_EXT_BUF_POOL_EN_COUNTER; + + if (bp->bpool[i].is_backup) + tmp |= BMI_EXT_BUF_POOL_BACKUP; + + tmp |= (u32)bp->bpool[i].size; + } + + iowrite32be(tmp, &bp_reg[i]); + } + + /* Clear unused pools */ + for (i = bp->count; i < max_bp_num; i++) + iowrite32be(0, &bp_reg[i]); + + /* Pools depletion */ + tmp = 0; + for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) { + if (bp->bpool[i].grp_bp_depleted) { + grp_depl_used = true; + tmp |= 0x80000000 >> i; + } + + if (bp->bpool[i].single_bp_depleted) + tmp |= 0x80 >> i; + } + + if (grp_depl_used) + tmp |= ((u32)bp->grp_bp_depleted_num - 1) << + BMI_POOL_DEP_NUM_OF_POOLS_SHIFT; + + iowrite32be(tmp, bp_depl_reg); + return 0; +} + +static bool is_init_done(struct fman_port_cfg *cfg) +{ + /* Checks if FMan port driver parameters were initialized */ + if (!cfg) + return true; + + return false; +} + +static int verify_size_of_fifo(struct fman_port *port) +{ + u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0; + + /* TX Ports */ + if (port->port_type == FMAN_PORT_TYPE_TX) { + min_fifo_size_required = (u32) + (roundup(port->max_frame_length, + FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS)); + + min_fifo_size_required += + port->cfg->tx_fifo_deq_pipeline_depth * + FMAN_BMI_FIFO_UNITS; + + opt_fifo_size_for_b2b = min_fifo_size_required; + + /* Add some margin for back-to-back capability to improve + * performance, allows the hardware to pipeline new frame dma + * while the previous frame not yet transmitted. + */ + if (port->port_speed == 10000) + opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS; + else + opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS; + } + + /* RX Ports */ + else if (port->port_type == FMAN_PORT_TYPE_RX) { + if (port->rev_info.major >= 6) + min_fifo_size_required = (u32) + (roundup(port->max_frame_length, + FMAN_BMI_FIFO_UNITS) + + (5 * FMAN_BMI_FIFO_UNITS)); + /* 4 according to spec + 1 for FOF>0 */ + else + min_fifo_size_required = (u32) + (roundup(min(port->max_frame_length, + port->rx_pools_params.largest_buf_size), + FMAN_BMI_FIFO_UNITS) + + (7 * FMAN_BMI_FIFO_UNITS)); + + opt_fifo_size_for_b2b = min_fifo_size_required; + + /* Add some margin for back-to-back capability to improve + * performance,allows the hardware to pipeline new frame dma + * while the previous frame not yet transmitted. + */ + if (port->port_speed == 10000) + opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS; + else + opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS; + } + + WARN_ON(min_fifo_size_required <= 0); + WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required); + + /* Verify the size */ + if (port->fifo_bufs.num < min_fifo_size_required) + dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n", + __func__, min_fifo_size_required); + else if (port->fifo_bufs.num < opt_fifo_size_for_b2b) + dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n", + __func__, opt_fifo_size_for_b2b); + + return 0; +} + +static int set_ext_buffer_pools(struct fman_port *port) +{ + struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools; + struct fman_buf_pool_depletion *buf_pool_depletion = + &port->cfg->buf_pool_depletion; + u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM]; + u16 sizes_array[BM_MAX_NUM_OF_POOLS]; + int i = 0, j = 0, err; + struct fman_port_bpools bpools; + + memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM); + memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS); + memcpy(&port->ext_buf_pools, ext_buf_pools, + sizeof(struct fman_ext_pools)); + + fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools, + ordered_array, + sizes_array); + + memset(&bpools, 0, sizeof(struct fman_port_bpools)); + bpools.count = ext_buf_pools->num_of_pools_used; + bpools.counters_enable = true; + for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) { + bpools.bpool[i].bpid = ordered_array[i]; + bpools.bpool[i].size = sizes_array[ordered_array[i]]; + } + + /* save pools parameters for later use */ + port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used; + port->rx_pools_params.largest_buf_size = + sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]]; + port->rx_pools_params.second_largest_buf_size = + sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]]; + + /* FMBM_RMPD reg. - pool depletion */ + if (buf_pool_depletion->pools_grp_mode_enable) { + bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools; + for (i = 0; i < port->bm_max_num_of_pools; i++) { + if (buf_pool_depletion->pools_to_consider[i]) { + for (j = 0; j < ext_buf_pools-> + num_of_pools_used; j++) { + if (i == ordered_array[j]) { + bpools.bpool[j]. + grp_bp_depleted = true; + break; + } + } + } + } + } + + if (buf_pool_depletion->single_pool_mode_enable) { + for (i = 0; i < port->bm_max_num_of_pools; i++) { + if (buf_pool_depletion-> + pools_to_consider_for_single_mode[i]) { + for (j = 0; j < ext_buf_pools-> + num_of_pools_used; j++) { + if (i == ordered_array[j]) { + bpools.bpool[j]. + single_bp_depleted = true; + break; + } + } + } + } + } + + err = set_bpools(port, &bpools); + if (err != 0) { + dev_err(port->dev, "%s: set_bpools() failed\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int init_low_level_driver(struct fman_port *port) +{ + struct fman_port_cfg *cfg = port->cfg; + u32 tmp_val; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask); + break; + default: + break; + } + + tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ? + (port->internal_buf_offset / OFFSET_UNITS + 1) : + (port->internal_buf_offset / OFFSET_UNITS)); + port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS); + port->cfg->int_buf_start_margin = port->internal_buf_offset; + + if (init(port) != 0) { + dev_err(port->dev, "%s: fman port initialization failed\n", + __func__); + return -ENODEV; + } + + /* The code bellow is a trick so the FM will not release the buffer + * to BM nor will try to enqueue the frame to QM + */ + if (port->port_type == FMAN_PORT_TYPE_TX) { + if (!cfg->dflt_fqid && cfg->dont_release_buf) { + /* override fmbm_tcfqid 0 with a false non-0 value. + * This will force FM to act according to tfene. + * Otherwise, if fmbm_tcfqid is 0 the FM will release + * buffers to BM regardless of fmbm_tfene + */ + iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid); + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, + &port->bmi_regs->tx.fmbm_tfene); + } + } + + return 0; +} + +static int fill_soc_specific_params(struct fman_port *port) +{ + u32 bmi_max_fifo_size; + + bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm); + port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size); + port->bm_max_num_of_pools = 64; + + /* P4080 - Major 2 + * P2041/P3041/P5020/P5040 - Major 3 + * Tx/Bx - Major 6 + */ + switch (port->rev_info.major) { + case 2: + case 3: + port->max_num_of_ext_pools = 4; + port->max_num_of_sub_portals = 12; + break; + + case 6: + port->max_num_of_ext_pools = 8; + port->max_num_of_sub_portals = 16; + break; + + default: + dev_err(port->dev, "%s: Unsupported FMan version\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type, + u16 speed) +{ + switch (type) { + case FMAN_PORT_TYPE_RX: + case FMAN_PORT_TYPE_TX: + switch (speed) { + case 10000: + return 4; + case 1000: + if (major >= 6) + return 2; + else + return 1; + default: + return 0; + } + default: + return 0; + } +} + +static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type, + u16 speed) +{ + switch (type) { + case FMAN_PORT_TYPE_RX: + case FMAN_PORT_TYPE_TX: + switch (speed) { + case 10000: + return 16; + case 1000: + if (major >= 6) + return 4; + else + return 3; + default: + return 0; + } + default: + return 0; + } +} + +static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type, + u16 speed) +{ + switch (type) { + case FMAN_PORT_TYPE_RX: + /* FMan V3 */ + if (major >= 6) + return 0; + + /* FMan V2 */ + if (speed == 10000) + return 8; + else + return 2; + case FMAN_PORT_TYPE_TX: + default: + return 0; + } +} + +static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type, + u16 speed) +{ + int val; + + if (major >= 6) { + switch (type) { + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + val = 12; + else + val = 3; + break; + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 8; + else + val = 2; + break; + default: + return 0; + } + } else { + switch (type) { + case FMAN_PORT_TYPE_TX: + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 8; + else + val = 1; + break; + default: + val = 0; + } + } + + return val; +} + +static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type, + u16 speed) +{ + /* FMan V3 */ + if (major >= 6) + return 0; + + /* FMan V2 */ + switch (type) { + case FMAN_PORT_TYPE_RX: + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + return 8; + else + return 1; + default: + return 0; + } +} + +static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type, + u16 speed) +{ + int val; + + if (major >= 6) { + switch (type) { + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + val = 64; + else + val = 50; + break; + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 96; + else + val = 50; + break; + default: + val = 0; + } + } else { + switch (type) { + case FMAN_PORT_TYPE_TX: + if (speed == 10000) + val = 48; + else + val = 44; + break; + case FMAN_PORT_TYPE_RX: + if (speed == 10000) + val = 48; + else + val = 45; + break; + default: + val = 0; + } + } + + return val; +} + +static void set_dflt_cfg(struct fman_port *port, + struct fman_port_params *port_params) +{ + struct fman_port_cfg *cfg = port->cfg; + + cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP; + cfg->color = FMAN_PORT_COLOR_GREEN; + cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END; + cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL; + cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD; + cfg->tx_fifo_low_comf_level = (5 * 1024); + cfg->deq_type = FMAN_PORT_DEQ_BY_PRI; + cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH; + cfg->tx_fifo_deq_pipeline_depth = + BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed); + cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type); + + cfg->rx_pri_elevation = + DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size); + port->cfg->rx_fifo_thr = + DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major, + port->max_port_fifo_size); + + if ((port->rev_info.major == 6) && + ((port->rev_info.minor == 0) || (port->rev_info.minor == 3))) + cfg->errata_A006320 = true; + + /* Excessive Threshold register - exists for pre-FMv3 chips only */ + if (port->rev_info.major < 6) + cfg->excessive_threshold_register = true; + else + cfg->fmbm_tfne_has_features = true; + + cfg->buffer_prefix_content.data_align = + DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN; +} + +static void set_rx_dflt_cfg(struct fman_port *port, + struct fman_port_params *port_params) +{ + port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD; + + memcpy(&port->cfg->ext_buf_pools, + &port_params->specific_params.rx_params.ext_buf_pools, + sizeof(struct fman_ext_pools)); + port->cfg->err_fqid = + port_params->specific_params.rx_params.err_fqid; + port->cfg->dflt_fqid = + port_params->specific_params.rx_params.dflt_fqid; +} + +static void set_tx_dflt_cfg(struct fman_port *port, + struct fman_port_params *port_params, + struct fman_port_dts_params *dts_params) +{ + port->cfg->tx_fifo_deq_pipeline_depth = + get_dflt_fifo_deq_pipeline_depth(port->rev_info.major, + port->port_type, + port->port_speed); + port->cfg->err_fqid = + port_params->specific_params.non_rx_params.err_fqid; + port->cfg->deq_sp = + (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK); + port->cfg->dflt_fqid = + port_params->specific_params.non_rx_params.dflt_fqid; + port->cfg->deq_high_priority = true; +} + +/** + * fman_port_config + * @port: Pointer to the port structure + * @params: Pointer to data structure of parameters + * + * Creates a descriptor for the FM PORT module. + * The routine returns a pointer to the FM PORT object. + * This descriptor must be passed as first parameter to all other FM PORT + * function calls. + * No actual initialization or configuration of FM hardware is done by this + * routine. + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_config(struct fman_port *port, struct fman_port_params *params) +{ + void __iomem *base_addr = port->dts_params.base_addr; + int err; + + /* Allocate the FM driver's parameters structure */ + port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL); + if (!port->cfg) + goto err_params; + + /* Initialize FM port parameters which will be kept by the driver */ + port->port_type = port->dts_params.type; + port->port_speed = port->dts_params.speed; + port->port_id = port->dts_params.id; + port->fm = port->dts_params.fman; + port->ext_pools_num = (u8)8; + + /* get FM revision */ + fman_get_revision(port->fm, &port->rev_info); + + err = fill_soc_specific_params(port); + if (err) + goto err_port_cfg; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + set_rx_dflt_cfg(port, params); + case FMAN_PORT_TYPE_TX: + set_tx_dflt_cfg(port, params, &port->dts_params); + default: + set_dflt_cfg(port, params); + } + + /* Continue with other parameters */ + /* set memory map pointers */ + port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET; + port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET; + + port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH; + /* resource distribution. */ + + port->fifo_bufs.num = + get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type, + port->port_speed) * FMAN_BMI_FIFO_UNITS; + port->fifo_bufs.extra = + DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS; + + port->open_dmas.num = + get_dflt_num_of_open_dmas(port->rev_info.major, + port->port_type, port->port_speed); + port->open_dmas.extra = + get_dflt_extra_num_of_open_dmas(port->rev_info.major, + port->port_type, port->port_speed); + port->tasks.num = + get_dflt_num_of_tasks(port->rev_info.major, + port->port_type, port->port_speed); + port->tasks.extra = + get_dflt_extra_num_of_tasks(port->rev_info.major, + port->port_type, port->port_speed); + + /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata + * workaround + */ + if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) && + (((port->port_type == FMAN_PORT_TYPE_TX) && + (port->port_speed == 1000)))) { + port->open_dmas.num = 16; + port->open_dmas.extra = 0; + } + + if (port->rev_info.major >= 6 && + port->port_type == FMAN_PORT_TYPE_TX && + port->port_speed == 1000) { + /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata + * workaround + */ + if (port->rev_info.major >= 6) { + u32 reg; + + reg = 0x00001013; + iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp); + } + } + + return 0; + +err_port_cfg: + kfree(port->cfg); +err_params: + kfree(port); + return -EINVAL; +} +EXPORT_SYMBOL(fman_port_config); + +/** + * fman_port_init + * port: A pointer to a FM Port module. + * Initializes the FM PORT module by defining the software structure and + * configuring the hardware registers. + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_init(struct fman_port *port) +{ + struct fman_port_cfg *cfg; + int err; + struct fman_port_init_params params; + + if (is_init_done(port->cfg)) + return -EINVAL; + + err = fman_sp_build_buffer_struct(&port->cfg->int_context, + &port->cfg->buffer_prefix_content, + &port->cfg->buf_margins, + &port->buffer_offsets, + &port->internal_buf_offset); + if (err) + return err; + + cfg = port->cfg; + + if (port->port_type == FMAN_PORT_TYPE_RX) { + /* Call the external Buffer routine which also checks fifo + * size and updates it if necessary + */ + /* define external buffer pools and pool depletion */ + err = set_ext_buffer_pools(port); + if (err) + return err; + /* check if the largest external buffer pool is large enough */ + if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE + + cfg->buf_margins.end_margins > + port->rx_pools_params.largest_buf_size) { + dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n", + __func__, cfg->buf_margins.start_margins, + cfg->buf_margins.end_margins, + port->rx_pools_params.largest_buf_size); + return -EINVAL; + } + } + + /* Call FM module routine for communicating parameters */ + memset(¶ms, 0, sizeof(params)); + params.port_id = port->port_id; + params.port_type = port->port_type; + params.port_speed = port->port_speed; + params.num_of_tasks = (u8)port->tasks.num; + params.num_of_extra_tasks = (u8)port->tasks.extra; + params.num_of_open_dmas = (u8)port->open_dmas.num; + params.num_of_extra_open_dmas = (u8)port->open_dmas.extra; + + if (port->fifo_bufs.num) { + err = verify_size_of_fifo(port); + if (err) + return err; + } + params.size_of_fifo = port->fifo_bufs.num; + params.extra_size_of_fifo = port->fifo_bufs.extra; + params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth; + params.max_frame_length = port->max_frame_length; + + err = fman_set_port_params(port->fm, ¶ms); + if (err) + return err; + + err = init_low_level_driver(port); + if (err) + return err; + + kfree(port->cfg); + port->cfg = NULL; + + return 0; +} +EXPORT_SYMBOL(fman_port_init); + +/** + * fman_port_cfg_buf_prefix_content + * @port A pointer to a FM Port module. + * @buffer_prefix_content A structure of parameters describing + * the structure of the buffer. + * Out parameter: + * Start margin - offset of data from + * start of external buffer. + * Defines the structure, size and content of the application buffer. + * The prefix, in Tx ports, if 'pass_prs_result', the application should set + * a value to their offsets in the prefix of the FM will save the first + * 'priv_data_size', than, depending on 'pass_prs_result' and + * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself + * (in this order), to the application buffer, and to offset. + * Calling this routine changes the buffer margins definitions in the internal + * driver data base from its default configuration: + * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE] + * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT]. + * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP]. + * May be used for all ports + * + * Allowed only following fman_port_config() and before fman_port_init(). + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_cfg_buf_prefix_content(struct fman_port *port, + struct fman_buffer_prefix_content * + buffer_prefix_content) +{ + if (is_init_done(port->cfg)) + return -EINVAL; + + memcpy(&port->cfg->buffer_prefix_content, + buffer_prefix_content, + sizeof(struct fman_buffer_prefix_content)); + /* if data_align was not initialized by user, + * we return to driver's default + */ + if (!port->cfg->buffer_prefix_content.data_align) + port->cfg->buffer_prefix_content.data_align = + DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN; + + return 0; +} +EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content); + +/** + * fman_port_disable + * port: A pointer to a FM Port module. + * + * Gracefully disable an FM port. The port will not start new tasks after all + * tasks associated with the port are terminated. + * + * This is a blocking routine, it returns after port is gracefully stopped, + * i.e. the port will not except new frames, but it will finish all frames + * or tasks which were already began. + * Allowed only following fman_port_init(). + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_disable(struct fman_port *port) +{ + u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp; + bool rx_port, failure = false; + int count; + + if (!is_init_done(port->cfg)) + return -EINVAL; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg; + bmi_status_reg = &port->bmi_regs->rx.fmbm_rst; + rx_port = true; + break; + case FMAN_PORT_TYPE_TX: + bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg; + bmi_status_reg = &port->bmi_regs->tx.fmbm_tst; + rx_port = false; + break; + default: + return -EINVAL; + } + + /* Disable QMI */ + if (!rx_port) { + tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN; + iowrite32be(tmp, &port->qmi_regs->fmqm_pnc); + + /* Wait for QMI to finish FD handling */ + count = 100; + do { + udelay(10); + tmp = ioread32be(&port->qmi_regs->fmqm_pns); + } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count); + + if (count == 0) { + /* Timeout */ + failure = true; + } + } + + /* Disable BMI */ + tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN; + iowrite32be(tmp, bmi_cfg_reg); + + /* Wait for graceful stop end */ + count = 500; + do { + udelay(10); + tmp = ioread32be(bmi_status_reg); + } while ((tmp & BMI_PORT_STATUS_BSY) && --count); + + if (count == 0) { + /* Timeout */ + failure = true; + } + + if (failure) + dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n", + __func__, port->port_id); + + return 0; +} +EXPORT_SYMBOL(fman_port_disable); + +/** + * fman_port_enable + * port: A pointer to a FM Port module. + * + * A runtime routine provided to allow disable/enable of port. + * + * Allowed only following fman_port_init(). + * + * Return: 0 on success; Error code otherwise. + */ +int fman_port_enable(struct fman_port *port) +{ + u32 __iomem *bmi_cfg_reg, tmp; + bool rx_port; + + if (!is_init_done(port->cfg)) + return -EINVAL; + + switch (port->port_type) { + case FMAN_PORT_TYPE_RX: + bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg; + rx_port = true; + break; + case FMAN_PORT_TYPE_TX: + bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg; + rx_port = false; + break; + default: + return -EINVAL; + } + + /* Enable QMI */ + if (!rx_port) { + tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN; + iowrite32be(tmp, &port->qmi_regs->fmqm_pnc); + } + + /* Enable BMI */ + tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN; + iowrite32be(tmp, bmi_cfg_reg); + + return 0; +} +EXPORT_SYMBOL(fman_port_enable); + +/** + * fman_port_bind + * dev: FMan Port OF device pointer + * + * Bind to a specific FMan Port. + * + * Allowed only after the port was created. + * + * Return: A pointer to the FMan port device. + */ +struct fman_port *fman_port_bind(struct device *dev) +{ + return (struct fman_port *)(dev_get_drvdata(get_device(dev))); +} +EXPORT_SYMBOL(fman_port_bind); + +/** + * fman_port_get_qman_channel_id + * port: Pointer to the FMan port devuce + * + * Get the QMan channel ID for the specific port + * + * Return: QMan channel ID + */ +u32 fman_port_get_qman_channel_id(struct fman_port *port) +{ + return port->dts_params.qman_channel_id; +} +EXPORT_SYMBOL(fman_port_get_qman_channel_id); + +static int fman_port_probe(struct platform_device *of_dev) +{ + struct fman_port *port; + struct fman *fman; + struct device_node *fm_node, *port_node; + struct resource res; + struct resource *dev_res; + const u32 *u32_prop; + int err = 0, lenp; + enum fman_port_type port_type; + u16 port_speed; + u8 port_id; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->dev = &of_dev->dev; + + port_node = of_node_get(of_dev->dev.of_node); + + /* Get the FM node */ + fm_node = of_get_parent(port_node); + if (!fm_node) { + dev_err(port->dev, "%s: of_get_parent() failed\n", __func__); + err = -ENODEV; + goto return_err; + } + + fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + of_node_put(fm_node); + if (!fman) { + err = -EINVAL; + goto return_err; + } + + u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n", + __func__, port_node->full_name); + err = -EINVAL; + goto return_err; + } + if (WARN_ON(lenp != sizeof(u32))) { + err = -EINVAL; + goto return_err; + } + port_id = (u8)fdt32_to_cpu(u32_prop[0]); + + port->dts_params.id = port_id; + + if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) { + port_type = FMAN_PORT_TYPE_TX; + port_speed = 1000; + u32_prop = (const u32 *)of_get_property(port_node, + "fsl,fman-10g-port", + &lenp); + if (u32_prop) + port_speed = 10000; + + } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) { + if (port_id >= TX_10G_PORT_BASE) + port_speed = 10000; + else + port_speed = 1000; + port_type = FMAN_PORT_TYPE_TX; + + } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) { + port_type = FMAN_PORT_TYPE_RX; + port_speed = 1000; + u32_prop = (const u32 *)of_get_property(port_node, + "fsl,fman-10g-port", &lenp); + if (u32_prop) + port_speed = 10000; + + } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) { + if (port_id >= RX_10G_PORT_BASE) + port_speed = 10000; + else + port_speed = 1000; + port_type = FMAN_PORT_TYPE_RX; + + } else { + dev_err(port->dev, "%s: Illegal port type\n", __func__); + err = -EINVAL; + goto return_err; + } + + port->dts_params.type = port_type; + port->dts_params.speed = port_speed; + + if (port_type == FMAN_PORT_TYPE_TX) { + u32 qman_channel_id; + + qman_channel_id = fman_get_qman_channel_id(fman, port_id); + if (qman_channel_id == 0) { + dev_err(port->dev, "%s: incorrect qman-channel-id\n", + __func__); + err = -EINVAL; + goto return_err; + } + port->dts_params.qman_channel_id = qman_channel_id; + } + + err = of_address_to_resource(port_node, 0, &res); + if (err < 0) { + dev_err(port->dev, "%s: of_address_to_resource() failed\n", + __func__); + err = -ENOMEM; + goto return_err; + } + + port->dts_params.fman = fman; + + of_node_put(port_node); + + dev_res = __devm_request_region(port->dev, &res, res.start, + resource_size(&res), "fman-port"); + if (!dev_res) { + dev_err(port->dev, "%s: __devm_request_region() failed\n", + __func__); + err = -EINVAL; + goto free_port; + } + + port->dts_params.base_addr = devm_ioremap(port->dev, res.start, + resource_size(&res)); + if (port->dts_params.base_addr == 0) + dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__); + + dev_set_drvdata(&of_dev->dev, port); + + return 0; + +return_err: + of_node_put(port_node); +free_port: + kfree(port); + return err; +} + +static const struct of_device_id fman_port_match[] = { + {.compatible = "fsl,fman-v3-port-rx"}, + {.compatible = "fsl,fman-v2-port-rx"}, + {.compatible = "fsl,fman-v3-port-tx"}, + {.compatible = "fsl,fman-v2-port-tx"}, + {} +}; + +MODULE_DEVICE_TABLE(of, fman_port_match); + +static struct platform_driver fman_port_driver = { + .driver = { + .name = "fsl-fman-port", + .of_match_table = fman_port_match, + }, + .probe = fman_port_probe, +}; + +builtin_platform_driver(fman_port_driver); diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h new file mode 100644 index 000000000000..8ba901737048 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_port.h @@ -0,0 +1,151 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FMAN_PORT_H +#define __FMAN_PORT_H + +#include "fman.h" + +/* FM Port API + * The FM uses a general module called "port" to represent a Tx port (MAC), + * an Rx port (MAC). + * The number of ports in an FM varies between SOCs. + * The SW driver manages these ports as sub-modules of the FM,i.e. after an + * FM is initialized, its ports may be initialized and operated upon. + * The port is initialized aware of its type, but other functions on a port + * may be indifferent to its type. When necessary, the driver verifies + * coherence and returns error if applicable. + * On initialization, user specifies the port type and it's index (relative + * to the port's type) - always starting at 0. + */ + +/* FM Frame error */ +/* Frame Descriptor errors */ +/* Not for Rx-Port! Unsupported Format */ +#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT +/* Not for Rx-Port! Length Error */ +#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH +/* DMA Data error */ +#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA +/* non Frame-Manager error; probably come from SEC that was chained to FM */ +#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM + /* IPR error */ +#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR) +/* IPR non-consistent-sp */ +#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & \ + ~FM_FD_IPR) + +/* Rx FIFO overflow, FCS error, code error, running disparity + * error (SGMII and TBI modes), FIFO parity error. + * PHY Sequence error, PHY error control character detected. + */ +#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL +/* Frame too long OR Frame size exceeds max_length_frame */ +#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE +/* indicates a classifier "drop" operation */ +#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD +/* Extract Out of Frame */ +#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION +/* No Scheme Selected */ +#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME +/* Keysize Overflow */ +#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW +/* Frame color is red */ +#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED +/* Frame color is yellow */ +#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW +/* Parser Time out Exceed */ +#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT +/* Invalid Soft Parser instruction */ +#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT +/* Header error was identified during parsing */ +#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR +/* Frame parsed beyind 256 first bytes */ +#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED +/* FPM Frame Processing Timeout Exceeded */ +#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001 + +struct fman_port; + +/* A structure for additional Rx port parameters */ +struct fman_port_rx_params { + u32 err_fqid; /* Error Queue Id. */ + u32 dflt_fqid; /* Default Queue Id. */ + /* Which external buffer pools are used + * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes. + */ + struct fman_ext_pools ext_buf_pools; +}; + +/* A structure for additional non-Rx port parameters */ +struct fman_port_non_rx_params { + /* Error Queue Id. */ + u32 err_fqid; + /* For Tx - Default Confirmation queue, 0 means no Tx confirmation + * for processed frames. For OP port - default Rx queue. + */ + u32 dflt_fqid; +}; + +/* A union for additional parameters depending on port type */ +union fman_port_specific_params { + /* Rx port parameters structure */ + struct fman_port_rx_params rx_params; + /* Non-Rx port parameters structure */ + struct fman_port_non_rx_params non_rx_params; +}; + +/* A structure representing FM initialization parameters */ +struct fman_port_params { + /* Virtual Address of memory mapped FM Port registers. */ + void *fm; + union fman_port_specific_params specific_params; + /* Additional parameters depending on port type. */ +}; + +int fman_port_config(struct fman_port *port, struct fman_port_params *params); + +int fman_port_init(struct fman_port *port); + +int fman_port_cfg_buf_prefix_content(struct fman_port *port, + struct fman_buffer_prefix_content + *buffer_prefix_content); + +int fman_port_disable(struct fman_port *port); + +int fman_port_enable(struct fman_port *port); + +u32 fman_port_get_qman_channel_id(struct fman_port *port); + +struct fman_port *fman_port_bind(struct device *dev); + +#endif /* __FMAN_PORT_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c new file mode 100644 index 000000000000..f9e7aa385cba --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_sp.c @@ -0,0 +1,166 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fman_sp.h" +#include "fman.h" + +void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools + *fm_ext_pools, + u8 *ordered_array, + u16 *sizes_array) +{ + u16 buf_size = 0; + int i = 0, j = 0, k = 0; + + /* First we copy the external buffers pools information + * to an ordered local array + */ + for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) { + /* get pool size */ + buf_size = fm_ext_pools->ext_buf_pool[i].size; + + /* keep sizes in an array according to poolId + * for direct access + */ + sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size; + + /* save poolId in an ordered array according to size */ + for (j = 0; j <= i; j++) { + /* this is the next free place in the array */ + if (j == i) + ordered_array[i] = + fm_ext_pools->ext_buf_pool[i].id; + else { + /* find the right place for this poolId */ + if (buf_size < sizes_array[ordered_array[j]]) { + /* move the pool_ids one place ahead + * to make room for this poolId + */ + for (k = i; k > j; k--) + ordered_array[k] = + ordered_array[k - 1]; + + /* now k==j, this is the place for + * the new size + */ + ordered_array[k] = + fm_ext_pools->ext_buf_pool[i].id; + break; + } + } + } + } +} + +int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy * + int_context_data_copy, + struct fman_buffer_prefix_content * + buffer_prefix_content, + struct fman_sp_buf_margins *buf_margins, + struct fman_sp_buffer_offsets *buffer_offsets, + u8 *internal_buf_offset) +{ + u32 tmp; + + /* Align start of internal context data to 16 byte */ + int_context_data_copy->ext_buf_offset = (u16) + ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ? + ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) & + ~(u16)(OFFSET_UNITS - 1)) : + buffer_prefix_content->priv_data_size); + + /* Translate margin and int_context params to FM parameters */ + /* Initialize with illegal value. Later we'll set legal values. */ + buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE; + buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE; + buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE; + + /* Internally the driver supports 4 options + * 1. prsResult/timestamp/hashResult selection (in fact 8 options, + * but for simplicity we'll + * relate to it as 1). + * 2. All IC context (from AD) not including debug. + */ + + /* This case covers the options under 1 */ + /* Copy size must be in 16-byte granularity. */ + int_context_data_copy->size = + (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) + + ((buffer_prefix_content->pass_time_stamp || + buffer_prefix_content->pass_hash_result) ? 16 : 0)); + + /* Align start of internal context data to 16 byte */ + int_context_data_copy->int_context_offset = + (u8)(buffer_prefix_content->pass_prs_result ? 32 : + ((buffer_prefix_content->pass_time_stamp || + buffer_prefix_content->pass_hash_result) ? 64 : 0)); + + if (buffer_prefix_content->pass_prs_result) + buffer_offsets->prs_result_offset = + int_context_data_copy->ext_buf_offset; + if (buffer_prefix_content->pass_time_stamp) + buffer_offsets->time_stamp_offset = + buffer_prefix_content->pass_prs_result ? + (int_context_data_copy->ext_buf_offset + + sizeof(struct fman_prs_result)) : + int_context_data_copy->ext_buf_offset; + if (buffer_prefix_content->pass_hash_result) + /* If PR is not requested, whether TS is + * requested or not, IC will be copied from TS + */ + buffer_offsets->hash_result_offset = + buffer_prefix_content->pass_prs_result ? + (int_context_data_copy->ext_buf_offset + + sizeof(struct fman_prs_result) + 8) : + int_context_data_copy->ext_buf_offset + 8; + + if (int_context_data_copy->size) + buf_margins->start_margins = + (u16)(int_context_data_copy->ext_buf_offset + + int_context_data_copy->size); + else + /* No Internal Context passing, STartMargin is + * immediately after private_info + */ + buf_margins->start_margins = + buffer_prefix_content->priv_data_size; + + /* align data start */ + tmp = (u32)(buf_margins->start_margins % + buffer_prefix_content->data_align); + if (tmp) + buf_margins->start_margins += + (buffer_prefix_content->data_align - tmp); + buffer_offsets->data_offset = buf_margins->start_margins; + + return 0; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h new file mode 100644 index 000000000000..820b7f63088f --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_sp.h @@ -0,0 +1,103 @@ +/* + * Copyright 2008 - 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FM_SP_H +#define __FM_SP_H + +#include "fman.h" +#include <linux/types.h> + +#define ILLEGAL_BASE (~0) + +/* defaults */ +#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN 64 + +/* Registers bit fields */ +#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000 +#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000 +#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000 +#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000 +#define FMAN_SP_SG_DISABLE 0x80000000 + +/* shifts */ +#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16 +#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30 +#define FMAN_SP_IC_TO_EXT_SHIFT 16 +#define FMAN_SP_IC_FROM_INT_SHIFT 8 + +/* structure for defining internal context copying */ +struct fman_sp_int_context_data_copy { + /* < Offset in External buffer to which internal + * context is copied to (Rx) or taken from (Tx, Op). + */ + u16 ext_buf_offset; + /* Offset within internal context to copy from + * (Rx) or to copy to (Tx, Op). + */ + u8 int_context_offset; + /* Internal offset size to be copied */ + u16 size; +}; + +/* struct for defining external buffer margins */ +struct fman_sp_buf_margins { + /* Number of bytes to be left at the beginning + * of the external buffer (must be divisible by 16) + */ + u16 start_margins; + /* number of bytes to be left at the end + * of the external buffer(must be divisible by 16) + */ + u16 end_margins; +}; + +struct fman_sp_buffer_offsets { + u32 data_offset; + u32 prs_result_offset; + u32 time_stamp_offset; + u32 hash_result_offset; +}; + +int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy + *int_context_data_copy, + struct fman_buffer_prefix_content + *buffer_prefix_content, + struct fman_sp_buf_margins *buf_margins, + struct fman_sp_buffer_offsets + *buffer_offsets, + u8 *internal_buf_offset); + +void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools + *fm_ext_pools, + u8 *ordered_array, + u16 *sizes_array); + +#endif /* __FM_SP_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c new file mode 100644 index 000000000000..efabb04a1ae8 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -0,0 +1,786 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "fman_tgec.h" +#include "fman.h" + +#include <linux/slab.h> +#include <linux/bitrev.h> +#include <linux/io.h> +#include <linux/crc32.h> + +/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */ +#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff + +/* Command and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_NO_LEN_CHK 0x00020000 +#define CMD_CFG_PAUSE_IGNORE 0x00000100 +#define CMF_CFG_CRC_FWD 0x00000040 +#define CMD_CFG_PROMIS_EN 0x00000010 +#define CMD_CFG_RX_EN 0x00000002 +#define CMD_CFG_TX_EN 0x00000001 + +/* Interrupt Mask Register (IMASK) */ +#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000 +#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000 +#define TGEC_IMASK_REM_FAULT 0x00004000 +#define TGEC_IMASK_LOC_FAULT 0x00002000 +#define TGEC_IMASK_TX_ECC_ER 0x00001000 +#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800 +#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400 +#define TGEC_IMASK_TX_ER 0x00000200 +#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100 +#define TGEC_IMASK_RX_ECC_ER 0x00000080 +#define TGEC_IMASK_RX_JAB_FRM 0x00000040 +#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020 +#define TGEC_IMASK_RX_RUNT_FRM 0x00000010 +#define TGEC_IMASK_RX_FRAG_FRM 0x00000008 +#define TGEC_IMASK_RX_LEN_ER 0x00000004 +#define TGEC_IMASK_RX_CRC_ER 0x00000002 +#define TGEC_IMASK_RX_ALIGN_ER 0x00000001 + +/* Hashtable Control Register (HASHTABLE_CTRL) */ +#define TGEC_HASH_MCAST_SHIFT 23 +#define TGEC_HASH_MCAST_EN 0x00000200 +#define TGEC_HASH_ADR_MSK 0x000001ff + +#define DEFAULT_TX_IPG_LENGTH 12 +#define DEFAULT_MAX_FRAME_LENGTH 0x600 +#define DEFAULT_PAUSE_QUANT 0xf000 + +/* number of pattern match registers (entries) */ +#define TGEC_NUM_OF_PADDRS 1 + +/* Group address bit indication */ +#define GROUP_ADDRESS 0x0000010000000000LL + +/* Hash table size (= 32 bits*8 regs) */ +#define TGEC_HASH_TABLE_SIZE 512 + +/* tGEC memory map */ +struct tgec_regs { + u32 tgec_id; /* 0x000 Controller ID */ + u32 reserved001[1]; /* 0x004 */ + u32 command_config; /* 0x008 Control and configuration */ + u32 mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */ + u32 mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */ + u32 maxfrm; /* 0x014 Maximum frame length */ + u32 pause_quant; /* 0x018 Pause quanta */ + u32 rx_fifo_sections; /* 0x01c */ + u32 tx_fifo_sections; /* 0x020 */ + u32 rx_fifo_almost_f_e; /* 0x024 */ + u32 tx_fifo_almost_f_e; /* 0x028 */ + u32 hashtable_ctrl; /* 0x02c Hash table control */ + u32 mdio_cfg_status; /* 0x030 */ + u32 mdio_command; /* 0x034 */ + u32 mdio_data; /* 0x038 */ + u32 mdio_regaddr; /* 0x03c */ + u32 status; /* 0x040 */ + u32 tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */ + u32 mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */ + u32 mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */ + u32 rx_fifo_ptr_rd; /* 0x050 */ + u32 rx_fifo_ptr_wr; /* 0x054 */ + u32 tx_fifo_ptr_rd; /* 0x058 */ + u32 tx_fifo_ptr_wr; /* 0x05c */ + u32 imask; /* 0x060 Interrupt mask */ + u32 ievent; /* 0x064 Interrupt event */ + u32 udp_port; /* 0x068 Defines a UDP Port number */ + u32 type_1588v2; /* 0x06c Type field for 1588v2 */ + u32 reserved070[4]; /* 0x070 */ + /* 10Ge Statistics Counter */ + u32 tfrm_u; /* 80 aFramesTransmittedOK */ + u32 tfrm_l; /* 84 aFramesTransmittedOK */ + u32 rfrm_u; /* 88 aFramesReceivedOK */ + u32 rfrm_l; /* 8c aFramesReceivedOK */ + u32 rfcs_u; /* 90 aFrameCheckSequenceErrors */ + u32 rfcs_l; /* 94 aFrameCheckSequenceErrors */ + u32 raln_u; /* 98 aAlignmentErrors */ + u32 raln_l; /* 9c aAlignmentErrors */ + u32 txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */ + u32 txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */ + u32 rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */ + u32 rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */ + u32 rlong_u; /* B0 aFrameTooLongErrors */ + u32 rlong_l; /* B4 aFrameTooLongErrors */ + u32 rflr_u; /* B8 aInRangeLengthErrors */ + u32 rflr_l; /* Bc aInRangeLengthErrors */ + u32 tvlan_u; /* C0 VLANTransmittedOK */ + u32 tvlan_l; /* C4 VLANTransmittedOK */ + u32 rvlan_u; /* C8 VLANReceivedOK */ + u32 rvlan_l; /* Cc VLANReceivedOK */ + u32 toct_u; /* D0 if_out_octets */ + u32 toct_l; /* D4 if_out_octets */ + u32 roct_u; /* D8 if_in_octets */ + u32 roct_l; /* Dc if_in_octets */ + u32 ruca_u; /* E0 if_in_ucast_pkts */ + u32 ruca_l; /* E4 if_in_ucast_pkts */ + u32 rmca_u; /* E8 ifInMulticastPkts */ + u32 rmca_l; /* Ec ifInMulticastPkts */ + u32 rbca_u; /* F0 ifInBroadcastPkts */ + u32 rbca_l; /* F4 ifInBroadcastPkts */ + u32 terr_u; /* F8 if_out_errors */ + u32 terr_l; /* Fc if_out_errors */ + u32 reserved100[2]; /* 100-108 */ + u32 tuca_u; /* 108 if_out_ucast_pkts */ + u32 tuca_l; /* 10c if_out_ucast_pkts */ + u32 tmca_u; /* 110 ifOutMulticastPkts */ + u32 tmca_l; /* 114 ifOutMulticastPkts */ + u32 tbca_u; /* 118 ifOutBroadcastPkts */ + u32 tbca_l; /* 11c ifOutBroadcastPkts */ + u32 rdrp_u; /* 120 etherStatsDropEvents */ + u32 rdrp_l; /* 124 etherStatsDropEvents */ + u32 reoct_u; /* 128 etherStatsOctets */ + u32 reoct_l; /* 12c etherStatsOctets */ + u32 rpkt_u; /* 130 etherStatsPkts */ + u32 rpkt_l; /* 134 etherStatsPkts */ + u32 trund_u; /* 138 etherStatsUndersizePkts */ + u32 trund_l; /* 13c etherStatsUndersizePkts */ + u32 r64_u; /* 140 etherStatsPkts64Octets */ + u32 r64_l; /* 144 etherStatsPkts64Octets */ + u32 r127_u; /* 148 etherStatsPkts65to127Octets */ + u32 r127_l; /* 14c etherStatsPkts65to127Octets */ + u32 r255_u; /* 150 etherStatsPkts128to255Octets */ + u32 r255_l; /* 154 etherStatsPkts128to255Octets */ + u32 r511_u; /* 158 etherStatsPkts256to511Octets */ + u32 r511_l; /* 15c etherStatsPkts256to511Octets */ + u32 r1023_u; /* 160 etherStatsPkts512to1023Octets */ + u32 r1023_l; /* 164 etherStatsPkts512to1023Octets */ + u32 r1518_u; /* 168 etherStatsPkts1024to1518Octets */ + u32 r1518_l; /* 16c etherStatsPkts1024to1518Octets */ + u32 r1519x_u; /* 170 etherStatsPkts1519toX */ + u32 r1519x_l; /* 174 etherStatsPkts1519toX */ + u32 trovr_u; /* 178 etherStatsOversizePkts */ + u32 trovr_l; /* 17c etherStatsOversizePkts */ + u32 trjbr_u; /* 180 etherStatsJabbers */ + u32 trjbr_l; /* 184 etherStatsJabbers */ + u32 trfrg_u; /* 188 etherStatsFragments */ + u32 trfrg_l; /* 18C etherStatsFragments */ + u32 rerr_u; /* 190 if_in_errors */ + u32 rerr_l; /* 194 if_in_errors */ +}; + +struct tgec_cfg { + bool pause_ignore; + bool promiscuous_mode_enable; + u16 max_frame_length; + u16 pause_quant; + u32 tx_ipg_length; +}; + +struct fman_mac { + /* Pointer to the memory mapped registers. */ + struct tgec_regs __iomem *regs; + /* MAC address of device; */ + u64 addr; + u16 max_speed; + void *dev_id; /* device cookie used by the exception cbs */ + fman_mac_exception_cb *exception_cb; + fman_mac_exception_cb *event_cb; + /* pointer to driver's global address hash table */ + struct eth_hash_t *multicast_addr_hash; + /* pointer to driver's individual address hash table */ + struct eth_hash_t *unicast_addr_hash; + u8 mac_id; + u32 exceptions; + struct tgec_cfg *cfg; + void *fm; + struct fman_rev_info fm_rev_info; +}; + +static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr) +{ + u32 tmp0, tmp1; + + tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24); + tmp1 = (u32)(adr[4] | adr[5] << 8); + iowrite32be(tmp0, ®s->mac_addr_0); + iowrite32be(tmp1, ®s->mac_addr_1); +} + +static void set_dflts(struct tgec_cfg *cfg) +{ + cfg->promiscuous_mode_enable = false; + cfg->pause_ignore = false; + cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH; + cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH; + cfg->pause_quant = DEFAULT_PAUSE_QUANT; +} + +static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg, + u32 exception_mask) +{ + u32 tmp; + + /* Config */ + tmp = CMF_CFG_CRC_FWD; + if (cfg->promiscuous_mode_enable) + tmp |= CMD_CFG_PROMIS_EN; + if (cfg->pause_ignore) + tmp |= CMD_CFG_PAUSE_IGNORE; + /* Payload length check disable */ + tmp |= CMD_CFG_NO_LEN_CHK; + iowrite32be(tmp, ®s->command_config); + + /* Max Frame Length */ + iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm); + /* Pause Time */ + iowrite32be(cfg->pause_quant, ®s->pause_quant); + + /* clear all pending events and set-up interrupts */ + iowrite32be(0xffffffff, ®s->ievent); + iowrite32be(ioread32be(®s->imask) | exception_mask, ®s->imask); + + return 0; +} + +static int check_init_parameters(struct fman_mac *tgec) +{ + if (tgec->max_speed < SPEED_10000) { + pr_err("10G MAC driver only support 10G speed\n"); + return -EINVAL; + } + if (tgec->addr == 0) { + pr_err("Ethernet 10G MAC Must have valid MAC Address\n"); + return -EINVAL; + } + if (!tgec->exception_cb) { + pr_err("uninitialized exception_cb\n"); + return -EINVAL; + } + if (!tgec->event_cb) { + pr_err("uninitialized event_cb\n"); + return -EINVAL; + } + + return 0; +} + +static int get_exception_flag(enum fman_mac_exceptions exception) +{ + u32 bit_mask; + + switch (exception) { + case FM_MAC_EX_10G_MDIO_SCAN_EVENT: + bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT; + break; + case FM_MAC_EX_10G_MDIO_CMD_CMPL: + bit_mask = TGEC_IMASK_MDIO_CMD_CMPL; + break; + case FM_MAC_EX_10G_REM_FAULT: + bit_mask = TGEC_IMASK_REM_FAULT; + break; + case FM_MAC_EX_10G_LOC_FAULT: + bit_mask = TGEC_IMASK_LOC_FAULT; + break; + case FM_MAC_EX_10G_TX_ECC_ER: + bit_mask = TGEC_IMASK_TX_ECC_ER; + break; + case FM_MAC_EX_10G_TX_FIFO_UNFL: + bit_mask = TGEC_IMASK_TX_FIFO_UNFL; + break; + case FM_MAC_EX_10G_TX_FIFO_OVFL: + bit_mask = TGEC_IMASK_TX_FIFO_OVFL; + break; + case FM_MAC_EX_10G_TX_ER: + bit_mask = TGEC_IMASK_TX_ER; + break; + case FM_MAC_EX_10G_RX_FIFO_OVFL: + bit_mask = TGEC_IMASK_RX_FIFO_OVFL; + break; + case FM_MAC_EX_10G_RX_ECC_ER: + bit_mask = TGEC_IMASK_RX_ECC_ER; + break; + case FM_MAC_EX_10G_RX_JAB_FRM: + bit_mask = TGEC_IMASK_RX_JAB_FRM; + break; + case FM_MAC_EX_10G_RX_OVRSZ_FRM: + bit_mask = TGEC_IMASK_RX_OVRSZ_FRM; + break; + case FM_MAC_EX_10G_RX_RUNT_FRM: + bit_mask = TGEC_IMASK_RX_RUNT_FRM; + break; + case FM_MAC_EX_10G_RX_FRAG_FRM: + bit_mask = TGEC_IMASK_RX_FRAG_FRM; + break; + case FM_MAC_EX_10G_RX_LEN_ER: + bit_mask = TGEC_IMASK_RX_LEN_ER; + break; + case FM_MAC_EX_10G_RX_CRC_ER: + bit_mask = TGEC_IMASK_RX_CRC_ER; + break; + case FM_MAC_EX_10G_RX_ALIGN_ER: + bit_mask = TGEC_IMASK_RX_ALIGN_ER; + break; + default: + bit_mask = 0; + break; + } + + return bit_mask; +} + +static void tgec_err_exception(void *handle) +{ + struct fman_mac *tgec = (struct fman_mac *)handle; + struct tgec_regs __iomem *regs = tgec->regs; + u32 event; + + /* do not handle MDIO events */ + event = ioread32be(®s->ievent) & + ~(TGEC_IMASK_MDIO_SCAN_EVENT | + TGEC_IMASK_MDIO_CMD_CMPL); + + event &= ioread32be(®s->imask); + + iowrite32be(event, ®s->ievent); + + if (event & TGEC_IMASK_REM_FAULT) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT); + if (event & TGEC_IMASK_LOC_FAULT) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT); + if (event & TGEC_IMASK_TX_ECC_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER); + if (event & TGEC_IMASK_TX_FIFO_UNFL) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL); + if (event & TGEC_IMASK_TX_FIFO_OVFL) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL); + if (event & TGEC_IMASK_TX_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER); + if (event & TGEC_IMASK_RX_FIFO_OVFL) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL); + if (event & TGEC_IMASK_RX_ECC_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER); + if (event & TGEC_IMASK_RX_JAB_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM); + if (event & TGEC_IMASK_RX_OVRSZ_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM); + if (event & TGEC_IMASK_RX_RUNT_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM); + if (event & TGEC_IMASK_RX_FRAG_FRM) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM); + if (event & TGEC_IMASK_RX_LEN_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER); + if (event & TGEC_IMASK_RX_CRC_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER); + if (event & TGEC_IMASK_RX_ALIGN_ER) + tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER); +} + +static void free_init_resources(struct fman_mac *tgec) +{ + fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id, + FMAN_INTR_TYPE_ERR); + + /* release the driver's group hash table */ + free_hash_table(tgec->multicast_addr_hash); + tgec->multicast_addr_hash = NULL; + + /* release the driver's individual hash table */ + free_hash_table(tgec->unicast_addr_hash); + tgec->unicast_addr_hash = NULL; +} + +static bool is_init_done(struct tgec_cfg *cfg) +{ + /* Checks if tGEC driver parameters were initialized */ + if (!cfg) + return true; + + return false; +} + +int tgec_enable(struct fman_mac *tgec, enum comm_mode mode) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp |= CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp |= CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_disable(struct fman_mac *tgec, enum comm_mode mode) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (mode & COMM_MODE_RX) + tmp &= ~CMD_CFG_RX_EN; + if (mode & COMM_MODE_TX) + tmp &= ~CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (new_val) + tmp |= CMD_CFG_PROMIS_EN; + else + tmp &= ~CMD_CFG_PROMIS_EN; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val) +{ + if (is_init_done(tgec->cfg)) + return -EINVAL; + + tgec->cfg->max_frame_length = new_val; + + return 0; +} + +int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority, + u16 pause_time, u16 __maybe_unused thresh_time) +{ + struct tgec_regs __iomem *regs = tgec->regs; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + iowrite32be((u32)pause_time, ®s->pause_quant); + + return 0; +} + +int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + if (!en) + tmp |= CMD_CFG_PAUSE_IGNORE; + else + tmp &= ~CMD_CFG_PAUSE_IGNORE; + iowrite32be(tmp, ®s->command_config); + + return 0; +} + +int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr) +{ + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr); + set_mac_address(tgec->regs, (u8 *)(*p_enet_addr)); + + return 0; +} + +int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) +{ + struct tgec_regs __iomem *regs = tgec->regs; + struct eth_hash_entry *hash_entry; + u32 crc = 0xFFFFFFFF, hash; + u64 addr; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + addr = ENET_ADDR_TO_UINT64(*eth_addr); + + if (!(addr & GROUP_ADDRESS)) { + /* Unicast addresses not supported in hash */ + pr_err("Unicast Address\n"); + return -EINVAL; + } + /* CRC calculation */ + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + /* Take 9 MSB bits */ + hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; + + /* Create element to be added to the driver hash table */ + hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + if (!hash_entry) + return -ENOMEM; + hash_entry->addr = addr; + INIT_LIST_HEAD(&hash_entry->node); + + list_add_tail(&hash_entry->node, + &tgec->multicast_addr_hash->lsts[hash]); + iowrite32be((hash | TGEC_HASH_MCAST_EN), ®s->hashtable_ctrl); + + return 0; +} + +int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) +{ + struct tgec_regs __iomem *regs = tgec->regs; + struct eth_hash_entry *hash_entry = NULL; + struct list_head *pos; + u32 crc = 0xFFFFFFFF, hash; + u64 addr; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + addr = ((*(u64 *)eth_addr) >> 16); + + /* CRC calculation */ + crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); + crc = bitrev32(crc); + /* Take 9 MSB bits */ + hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; + + list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); + if (hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; + } + } + if (list_empty(&tgec->multicast_addr_hash->lsts[hash])) + iowrite32be((hash & ~TGEC_HASH_MCAST_EN), + ®s->hashtable_ctrl); + + return 0; +} + +int tgec_get_version(struct fman_mac *tgec, u32 *mac_version) +{ + struct tgec_regs __iomem *regs = tgec->regs; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + *mac_version = ioread32be(®s->tgec_id); + + return 0; +} + +int tgec_set_exception(struct fman_mac *tgec, + enum fman_mac_exceptions exception, bool enable) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 bit_mask = 0; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + bit_mask = get_exception_flag(exception); + if (bit_mask) { + if (enable) + tgec->exceptions |= bit_mask; + else + tgec->exceptions &= ~bit_mask; + } else { + pr_err("Undefined exception\n"); + return -EINVAL; + } + if (enable) + iowrite32be(ioread32be(®s->imask) | bit_mask, ®s->imask); + else + iowrite32be(ioread32be(®s->imask) & ~bit_mask, ®s->imask); + + return 0; +} + +int tgec_init(struct fman_mac *tgec) +{ + struct tgec_cfg *cfg; + enet_addr_t eth_addr; + int err; + + if (is_init_done(tgec->cfg)) + return -EINVAL; + + if (DEFAULT_RESET_ON_INIT && + (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) { + pr_err("Can't reset MAC!\n"); + return -EINVAL; + } + + err = check_init_parameters(tgec); + if (err) + return err; + + cfg = tgec->cfg; + + MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr); + set_mac_address(tgec->regs, (u8 *)eth_addr); + + /* interrupts */ + /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */ + if (tgec->fm_rev_info.major <= 2) + tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT | + TGEC_IMASK_LOC_FAULT); + + err = init(tgec->regs, cfg, tgec->exceptions); + if (err) { + free_init_resources(tgec); + pr_err("TGEC version doesn't support this i/f mode\n"); + return err; + } + + /* Max Frame Length */ + err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id, + cfg->max_frame_length); + if (err) { + pr_err("Setting max frame length FAILED\n"); + free_init_resources(tgec); + return -EINVAL; + } + + /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */ + if (tgec->fm_rev_info.major == 2) { + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + /* restore the default tx ipg Length */ + tmp = (ioread32be(®s->tx_ipg_len) & + ~TGEC_TX_IPG_LENGTH_MASK) | 12; + + iowrite32be(tmp, ®s->tx_ipg_len); + } + + tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE); + if (!tgec->multicast_addr_hash) { + free_init_resources(tgec); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE); + if (!tgec->unicast_addr_hash) { + free_init_resources(tgec); + pr_err("allocation hash table is FAILED\n"); + return -ENOMEM; + } + + fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id, + FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec); + + kfree(cfg); + tgec->cfg = NULL; + + return 0; +} + +int tgec_free(struct fman_mac *tgec) +{ + free_init_resources(tgec); + + if (tgec->cfg) + tgec->cfg = NULL; + + kfree(tgec->cfg); + kfree(tgec); + + return 0; +} + +struct fman_mac *tgec_config(struct fman_mac_params *params) +{ + struct fman_mac *tgec; + struct tgec_cfg *cfg; + void __iomem *base_addr; + + base_addr = params->base_addr; + /* allocate memory for the UCC GETH data structure. */ + tgec = kzalloc(sizeof(*tgec), GFP_KERNEL); + if (!tgec) + return NULL; + + /* allocate memory for the 10G MAC driver parameters data structure. */ + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + tgec_free(tgec); + return NULL; + } + + /* Plant parameter structure pointer */ + tgec->cfg = cfg; + + set_dflts(cfg); + + tgec->regs = base_addr; + tgec->addr = ENET_ADDR_TO_UINT64(params->addr); + tgec->max_speed = params->max_speed; + tgec->mac_id = params->mac_id; + tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT | + TGEC_IMASK_REM_FAULT | + TGEC_IMASK_LOC_FAULT | + TGEC_IMASK_TX_ECC_ER | + TGEC_IMASK_TX_FIFO_UNFL | + TGEC_IMASK_TX_FIFO_OVFL | + TGEC_IMASK_TX_ER | + TGEC_IMASK_RX_FIFO_OVFL | + TGEC_IMASK_RX_ECC_ER | + TGEC_IMASK_RX_JAB_FRM | + TGEC_IMASK_RX_OVRSZ_FRM | + TGEC_IMASK_RX_RUNT_FRM | + TGEC_IMASK_RX_FRAG_FRM | + TGEC_IMASK_RX_CRC_ER | + TGEC_IMASK_RX_ALIGN_ER); + tgec->exception_cb = params->exception_cb; + tgec->event_cb = params->event_cb; + tgec->dev_id = params->dev_id; + tgec->fm = params->fm; + + /* Save FMan revision */ + fman_get_revision(tgec->fm, &tgec->fm_rev_info); + + return tgec; +} diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h new file mode 100644 index 000000000000..514bba9f47ce --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -0,0 +1,55 @@ +/* + * Copyright 2008-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __TGEC_H +#define __TGEC_H + +#include "fman_mac.h" + +struct fman_mac *tgec_config(struct fman_mac_params *params); +int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val); +int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr); +int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val); +int tgec_enable(struct fman_mac *tgec, enum comm_mode mode); +int tgec_disable(struct fman_mac *tgec, enum comm_mode mode); +int tgec_init(struct fman_mac *tgec); +int tgec_free(struct fman_mac *tgec); +int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en); +int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority, + u16 pause_time, u16 thresh_time); +int tgec_set_exception(struct fman_mac *tgec, + enum fman_mac_exceptions exception, bool enable); +int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); +int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); +int tgec_get_version(struct fman_mac *tgec, u32 *mac_version); + +#endif /* __TGEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c new file mode 100644 index 000000000000..e33d9d24c1db --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -0,0 +1,977 @@ +/* Copyright 2008-2015 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/device.h> +#include <linux/phy.h> +#include <linux/netdevice.h> +#include <linux/phy_fixed.h> +#include <linux/etherdevice.h> +#include <linux/libfdt_env.h> + +#include "mac.h" +#include "fman_mac.h" +#include "fman_dtsec.h" +#include "fman_tgec.h" +#include "fman_memac.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL FMan MAC API based driver"); + +struct mac_priv_s { + struct device *dev; + void __iomem *vaddr; + u8 cell_index; + phy_interface_t phy_if; + struct fman *fman; + struct device_node *phy_node; + struct device_node *internal_phy_node; + /* List of multicast addresses */ + struct list_head mc_addr_list; + struct platform_device *eth_dev; + struct fixed_phy_status *fixed_link; + u16 speed; + u16 max_speed; + + int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode); + int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode); +}; + +struct mac_address { + u8 addr[ETH_ALEN]; + struct list_head list; +}; + +static void mac_exception(void *handle, enum fman_mac_exceptions ex) +{ + struct mac_device *mac_dev; + struct mac_priv_s *priv; + + mac_dev = handle; + priv = mac_dev->priv; + + if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) { + /* don't flag RX FIFO after the first */ + mac_dev->set_exception(mac_dev->fman_mac, + FM_MAC_EX_10G_RX_FIFO_OVFL, false); + dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex); + } + + dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c", + __func__, ex); +} + +static void set_fman_mac_params(struct mac_device *mac_dev, + struct fman_mac_params *params) +{ + struct mac_priv_s *priv = mac_dev->priv; + + params->base_addr = (typeof(params->base_addr)) + devm_ioremap(priv->dev, mac_dev->res->start, + resource_size(mac_dev->res)); + memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr)); + params->max_speed = priv->max_speed; + params->phy_if = priv->phy_if; + params->basex_if = false; + params->mac_id = priv->cell_index; + params->fm = (void *)priv->fman; + params->exception_cb = mac_exception; + params->event_cb = mac_exception; + params->dev_id = mac_dev; + params->internal_phy_node = priv->internal_phy_node; +} + +static int tgec_initialization(struct mac_device *mac_dev) +{ + int err; + struct mac_priv_s *priv; + struct fman_mac_params params; + u32 version; + + priv = mac_dev->priv; + + set_fman_mac_params(mac_dev, ¶ms); + + mac_dev->fman_mac = tgec_config(¶ms); + if (!mac_dev->fman_mac) { + err = -EINVAL; + goto _return; + } + + err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); + if (err < 0) + goto _return_fm_mac_free; + + err = tgec_init(mac_dev->fman_mac); + if (err < 0) + goto _return_fm_mac_free; + + /* For 10G MAC, disable Tx ECC exception */ + err = mac_dev->set_exception(mac_dev->fman_mac, + FM_MAC_EX_10G_TX_ECC_ER, false); + if (err < 0) + goto _return_fm_mac_free; + + err = tgec_get_version(mac_dev->fman_mac, &version); + if (err < 0) + goto _return_fm_mac_free; + + dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version); + + goto _return; + +_return_fm_mac_free: + tgec_free(mac_dev->fman_mac); + +_return: + return err; +} + +static int dtsec_initialization(struct mac_device *mac_dev) +{ + int err; + struct mac_priv_s *priv; + struct fman_mac_params params; + u32 version; + + priv = mac_dev->priv; + + set_fman_mac_params(mac_dev, ¶ms); + + mac_dev->fman_mac = dtsec_config(¶ms); + if (!mac_dev->fman_mac) { + err = -EINVAL; + goto _return; + } + + err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); + if (err < 0) + goto _return_fm_mac_free; + + err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true); + if (err < 0) + goto _return_fm_mac_free; + + err = dtsec_init(mac_dev->fman_mac); + if (err < 0) + goto _return_fm_mac_free; + + /* For 1G MAC, disable by default the MIB counters overflow interrupt */ + err = mac_dev->set_exception(mac_dev->fman_mac, + FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false); + if (err < 0) + goto _return_fm_mac_free; + + err = dtsec_get_version(mac_dev->fman_mac, &version); + if (err < 0) + goto _return_fm_mac_free; + + dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version); + + goto _return; + +_return_fm_mac_free: + dtsec_free(mac_dev->fman_mac); + +_return: + return err; +} + +static int memac_initialization(struct mac_device *mac_dev) +{ + int err; + struct mac_priv_s *priv; + struct fman_mac_params params; + + priv = mac_dev->priv; + + set_fman_mac_params(mac_dev, ¶ms); + + if (priv->max_speed == SPEED_10000) + params.phy_if = PHY_INTERFACE_MODE_XGMII; + + mac_dev->fman_mac = memac_config(¶ms); + if (!mac_dev->fman_mac) { + err = -EINVAL; + goto _return; + } + + err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); + if (err < 0) + goto _return_fm_mac_free; + + err = memac_cfg_reset_on_init(mac_dev->fman_mac, true); + if (err < 0) + goto _return_fm_mac_free; + + err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link); + if (err < 0) + goto _return_fm_mac_free; + + err = memac_init(mac_dev->fman_mac); + if (err < 0) + goto _return_fm_mac_free; + + dev_info(priv->dev, "FMan MEMAC\n"); + + goto _return; + +_return_fm_mac_free: + memac_free(mac_dev->fman_mac); + +_return: + return err; +} + +static int start(struct mac_device *mac_dev) +{ + int err; + struct phy_device *phy_dev = mac_dev->phy_dev; + struct mac_priv_s *priv = mac_dev->priv; + + err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX); + if (!err && phy_dev) + phy_start(phy_dev); + + return err; +} + +static int stop(struct mac_device *mac_dev) +{ + struct mac_priv_s *priv = mac_dev->priv; + + if (mac_dev->phy_dev) + phy_stop(mac_dev->phy_dev); + + return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX); +} + +static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev) +{ + struct mac_priv_s *priv; + struct mac_address *old_addr, *tmp; + struct netdev_hw_addr *ha; + int err; + enet_addr_t *addr; + + priv = mac_dev->priv; + + /* Clear previous address list */ + list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) { + addr = (enet_addr_t *)old_addr->addr; + err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr); + if (err < 0) + return err; + + list_del(&old_addr->list); + kfree(old_addr); + } + + /* Add all the addresses from the new list */ + netdev_for_each_mc_addr(ha, net_dev) { + addr = (enet_addr_t *)ha->addr; + err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr); + if (err < 0) + return err; + + tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + + ether_addr_copy(tmp->addr, ha->addr); + list_add(&tmp->list, &priv->mc_addr_list); + } + return 0; +} + +/** + * fman_set_mac_active_pause + * @mac_dev: A pointer to the MAC device + * @rx: Pause frame setting for RX + * @tx: Pause frame setting for TX + * + * Set the MAC RX/TX PAUSE frames settings + * + * Avoid redundant calls to FMD, if the MAC driver already contains the desired + * active PAUSE settings. Otherwise, the new active settings should be reflected + * in FMan. + * + * Return: 0 on success; Error code otherwise. + */ +int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx) +{ + struct fman_mac *fman_mac = mac_dev->fman_mac; + int err = 0; + + if (rx != mac_dev->rx_pause_active) { + err = mac_dev->set_rx_pause(fman_mac, rx); + if (likely(err == 0)) + mac_dev->rx_pause_active = rx; + } + + if (tx != mac_dev->tx_pause_active) { + u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE); + + err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0); + + if (likely(err == 0)) + mac_dev->tx_pause_active = tx; + } + + return err; +} +EXPORT_SYMBOL(fman_set_mac_active_pause); + +/** + * fman_get_pause_cfg + * @mac_dev: A pointer to the MAC device + * @rx: Return value for RX setting + * @tx: Return value for TX setting + * + * Determine the MAC RX/TX PAUSE frames settings based on PHY + * autonegotiation or values set by eththool. + * + * Return: Pointer to FMan device. + */ +void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, + bool *tx_pause) +{ + struct phy_device *phy_dev = mac_dev->phy_dev; + u16 lcl_adv, rmt_adv; + u8 flowctrl; + + *rx_pause = *tx_pause = false; + + if (!phy_dev->duplex) + return; + + /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings + * are those set by ethtool. + */ + if (!mac_dev->autoneg_pause) { + *rx_pause = mac_dev->rx_pause_req; + *tx_pause = mac_dev->tx_pause_req; + return; + } + + /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE + * settings depend on the result of the link negotiation. + */ + + /* get local capabilities */ + lcl_adv = 0; + if (phy_dev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phy_dev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + /* get link partner capabilities */ + rmt_adv = 0; + if (phy_dev->pause) + rmt_adv |= LPA_PAUSE_CAP; + if (phy_dev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + /* Calculate TX/RX settings based on local and peer advertised + * symmetric/asymmetric PAUSE capabilities. + */ + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_RX) + *rx_pause = true; + if (flowctrl & FLOW_CTRL_TX) + *tx_pause = true; +} +EXPORT_SYMBOL(fman_get_pause_cfg); + +static void adjust_link_void(struct net_device *net_dev) +{ +} + +static void adjust_link_dtsec(struct net_device *net_dev) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa_eth_data *eth_data = dev->platform_data; + struct mac_device *mac_dev = eth_data->mac_dev; + struct phy_device *phy_dev = mac_dev->phy_dev; + struct fman_mac *fman_mac; + bool rx_pause, tx_pause; + int err; + + fman_mac = mac_dev->fman_mac; + if (!phy_dev->link) { + dtsec_restart_autoneg(fman_mac); + + return; + } + + dtsec_adjust_link(fman_mac, phy_dev->speed); + fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); + err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); + if (err < 0) + netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err); +} + +static void adjust_link_memac(struct net_device *net_dev) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa_eth_data *eth_data = dev->platform_data; + struct mac_device *mac_dev = eth_data->mac_dev; + struct phy_device *phy_dev = mac_dev->phy_dev; + struct fman_mac *fman_mac; + bool rx_pause, tx_pause; + int err; + + fman_mac = mac_dev->fman_mac; + memac_adjust_link(fman_mac, phy_dev->speed); + + fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); + err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); + if (err < 0) + netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err); +} + +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. + */ +static int init_phy(struct net_device *net_dev, + struct mac_device *mac_dev, + void (*adj_lnk)(struct net_device *)) +{ + struct phy_device *phy_dev; + struct mac_priv_s *priv = mac_dev->priv; + + phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0, + priv->phy_if); + if (!phy_dev) { + netdev_err(net_dev, "Could not connect to PHY\n"); + return -ENODEV; + } + + /* Remove any features not supported by the controller */ + phy_dev->supported &= mac_dev->if_support; + /* Enable the symmetric and asymmetric PAUSE frame advertisements, + * as most of the PHY drivers do not enable them by default. + */ + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + phy_dev->advertising = phy_dev->supported; + + mac_dev->phy_dev = phy_dev; + + return 0; +} + +static int dtsec_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) +{ + return init_phy(net_dev, mac_dev, &adjust_link_dtsec); +} + +static int tgec_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) +{ + return init_phy(net_dev, mac_dev, adjust_link_void); +} + +static int memac_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) +{ + return init_phy(net_dev, mac_dev, &adjust_link_memac); +} + +static void setup_dtsec(struct mac_device *mac_dev) +{ + mac_dev->init_phy = dtsec_init_phy; + mac_dev->init = dtsec_initialization; + mac_dev->set_promisc = dtsec_set_promiscuous; + mac_dev->change_addr = dtsec_modify_mac_address; + mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address; + mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address; + mac_dev->set_tx_pause = dtsec_set_tx_pause_frames; + mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; + mac_dev->set_exception = dtsec_set_exception; + mac_dev->set_multi = set_multi; + mac_dev->start = start; + mac_dev->stop = stop; + + mac_dev->priv->enable = dtsec_enable; + mac_dev->priv->disable = dtsec_disable; +} + +static void setup_tgec(struct mac_device *mac_dev) +{ + mac_dev->init_phy = tgec_init_phy; + mac_dev->init = tgec_initialization; + mac_dev->set_promisc = tgec_set_promiscuous; + mac_dev->change_addr = tgec_modify_mac_address; + mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address; + mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address; + mac_dev->set_tx_pause = tgec_set_tx_pause_frames; + mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; + mac_dev->set_exception = tgec_set_exception; + mac_dev->set_multi = set_multi; + mac_dev->start = start; + mac_dev->stop = stop; + + mac_dev->priv->enable = tgec_enable; + mac_dev->priv->disable = tgec_disable; +} + +static void setup_memac(struct mac_device *mac_dev) +{ + mac_dev->init_phy = memac_init_phy; + mac_dev->init = memac_initialization; + mac_dev->set_promisc = memac_set_promiscuous; + mac_dev->change_addr = memac_modify_mac_address; + mac_dev->add_hash_mac_addr = memac_add_hash_mac_address; + mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address; + mac_dev->set_tx_pause = memac_set_tx_pause_frames; + mac_dev->set_rx_pause = memac_accept_rx_pause_frames; + mac_dev->set_exception = memac_set_exception; + mac_dev->set_multi = set_multi; + mac_dev->start = start; + mac_dev->stop = stop; + + mac_dev->priv->enable = memac_enable; + mac_dev->priv->disable = memac_disable; +} + +#define DTSEC_SUPPORTED \ + (SUPPORTED_10baseT_Half \ + | SUPPORTED_10baseT_Full \ + | SUPPORTED_100baseT_Half \ + | SUPPORTED_100baseT_Full \ + | SUPPORTED_Autoneg \ + | SUPPORTED_Pause \ + | SUPPORTED_Asym_Pause \ + | SUPPORTED_MII) + +static DEFINE_MUTEX(eth_lock); + +static const char phy_str[][11] = { + [PHY_INTERFACE_MODE_MII] = "mii", + [PHY_INTERFACE_MODE_GMII] = "gmii", + [PHY_INTERFACE_MODE_SGMII] = "sgmii", + [PHY_INTERFACE_MODE_TBI] = "tbi", + [PHY_INTERFACE_MODE_RMII] = "rmii", + [PHY_INTERFACE_MODE_RGMII] = "rgmii", + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", + [PHY_INTERFACE_MODE_RTBI] = "rtbi", + [PHY_INTERFACE_MODE_XGMII] = "xgmii" +}; + +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phy_str); i++) + if (strcmp(str, phy_str[i]) == 0) + return (phy_interface_t)i; + + return PHY_INTERFACE_MODE_MII; +} + +static const u16 phy2speed[] = { + [PHY_INTERFACE_MODE_MII] = SPEED_100, + [PHY_INTERFACE_MODE_GMII] = SPEED_1000, + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, + [PHY_INTERFACE_MODE_TBI] = SPEED_1000, + [PHY_INTERFACE_MODE_RMII] = SPEED_100, + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000 +}; + +static struct platform_device *dpaa_eth_add_device(int fman_id, + struct mac_device *mac_dev, + struct device_node *node) +{ + struct platform_device *pdev; + struct dpaa_eth_data data; + struct mac_priv_s *priv; + static int dpaa_eth_dev_cnt; + int ret; + + priv = mac_dev->priv; + + data.mac_dev = mac_dev; + data.mac_hw_id = priv->cell_index; + data.fman_hw_id = fman_id; + data.mac_node = node; + + mutex_lock(ð_lock); + + pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt); + if (!pdev) { + ret = -ENOMEM; + goto no_mem; + } + + ret = platform_device_add_data(pdev, &data, sizeof(data)); + if (ret) + goto err; + + ret = platform_device_add(pdev); + if (ret) + goto err; + + dpaa_eth_dev_cnt++; + mutex_unlock(ð_lock); + + return pdev; + +err: + platform_device_put(pdev); +no_mem: + mutex_unlock(ð_lock); + + return ERR_PTR(ret); +} + +static const struct of_device_id mac_match[] = { + { .compatible = "fsl,fman-dtsec" }, + { .compatible = "fsl,fman-xgec" }, + { .compatible = "fsl,fman-memac" }, + {} +}; +MODULE_DEVICE_TABLE(of, mac_match); + +static int mac_probe(struct platform_device *_of_dev) +{ + int err, i, lenp, nph; + struct device *dev; + struct device_node *mac_node, *dev_node; + struct mac_device *mac_dev; + struct platform_device *of_dev; + struct resource res; + struct mac_priv_s *priv; + const u8 *mac_addr; + const char *char_prop; + const u32 *u32_prop; + u8 fman_id; + + dev = &_of_dev->dev; + mac_node = dev->of_node; + + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL); + if (!mac_dev) { + err = -ENOMEM; + dev_err(dev, "devm_kzalloc() = %d\n", err); + goto _return; + } + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto _return; + } + + /* Save private information */ + mac_dev->priv = priv; + priv->dev = dev; + + if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) { + setup_dtsec(mac_dev); + priv->internal_phy_node = of_parse_phandle(mac_node, + "tbi-handle", 0); + } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) { + setup_tgec(mac_dev); + } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) { + setup_memac(mac_dev); + priv->internal_phy_node = of_parse_phandle(mac_node, + "pcsphy-handle", 0); + } else { + dev_err(dev, "MAC node (%s) contains unsupported MAC\n", + mac_node->full_name); + err = -EINVAL; + goto _return; + } + + /* Register mac_dev */ + dev_set_drvdata(dev, mac_dev); + + INIT_LIST_HEAD(&priv->mc_addr_list); + + /* Get the FM node */ + dev_node = of_get_parent(mac_node); + if (!dev_node) { + dev_err(dev, "of_get_parent(%s) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + + of_dev = of_find_device_by_node(dev_node); + if (!of_dev) { + dev_err(dev, "of_find_device_by_node(%s) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + + /* Get the FMan cell-index */ + u32_prop = of_get_property(dev_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(dev, "of_get_property(%s, cell-index) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + WARN_ON(lenp != sizeof(u32)); + /* cell-index 0 => FMan id 1 */ + fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1); + + priv->fman = fman_bind(&of_dev->dev); + if (!priv->fman) { + dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name); + err = -ENODEV; + goto _return_of_node_put; + } + + of_node_put(dev_node); + + /* Get the address of the memory mapped registers */ + err = of_address_to_resource(mac_node, 0, &res); + if (err < 0) { + dev_err(dev, "of_address_to_resource(%s) = %d\n", + mac_node->full_name, err); + goto _return_dev_set_drvdata; + } + + mac_dev->res = __devm_request_region(dev, + fman_get_mem_region(priv->fman), + res.start, res.end + 1 - res.start, + "mac"); + if (!mac_dev->res) { + dev_err(dev, "__devm_request_mem_region(mac) failed\n"); + err = -EBUSY; + goto _return_dev_set_drvdata; + } + + priv->vaddr = devm_ioremap(dev, mac_dev->res->start, + mac_dev->res->end + 1 - mac_dev->res->start); + if (!priv->vaddr) { + dev_err(dev, "devm_ioremap() failed\n"); + err = -EIO; + goto _return_dev_set_drvdata; + } + + if (!of_device_is_available(mac_node)) { + devm_iounmap(dev, priv->vaddr); + __devm_release_region(dev, fman_get_mem_region(priv->fman), + res.start, res.end + 1 - res.start); + devm_kfree(dev, mac_dev); + dev_set_drvdata(dev, NULL); + return -ENODEV; + } + + /* Get the cell-index */ + u32_prop = of_get_property(mac_node, "cell-index", &lenp); + if (!u32_prop) { + dev_err(dev, "of_get_property(%s, cell-index) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + WARN_ON(lenp != sizeof(u32)); + priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]); + + /* Get the MAC address */ + mac_addr = of_get_mac_address(mac_node); + if (!mac_addr) { + dev_err(dev, "of_get_mac_address(%s) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr)); + + /* Get the port handles */ + nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); + if (unlikely(nph < 0)) { + dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n", + mac_node->full_name); + err = nph; + goto _return_dev_set_drvdata; + } + + if (nph != ARRAY_SIZE(mac_dev->port)) { + dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n", + mac_node->full_name); + err = -EINVAL; + goto _return_dev_set_drvdata; + } + + for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { + /* Find the port node */ + dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); + if (!dev_node) { + dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n", + mac_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + + of_dev = of_find_device_by_node(dev_node); + if (!of_dev) { + dev_err(dev, "of_find_device_by_node(%s) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + + mac_dev->port[i] = fman_port_bind(&of_dev->dev); + if (!mac_dev->port[i]) { + dev_err(dev, "dev_get_drvdata(%s) failed\n", + dev_node->full_name); + err = -EINVAL; + goto _return_of_node_put; + } + of_node_put(dev_node); + } + + /* Get the PHY connection type */ + char_prop = (const char *)of_get_property(mac_node, + "phy-connection-type", NULL); + if (!char_prop) { + dev_warn(dev, + "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n", + mac_node->full_name); + priv->phy_if = PHY_INTERFACE_MODE_MII; + } else { + priv->phy_if = str2phy(char_prop); + } + + priv->speed = phy2speed[priv->phy_if]; + priv->max_speed = priv->speed; + mac_dev->if_support = DTSEC_SUPPORTED; + /* We don't support half-duplex in SGMII mode */ + if (priv->phy_if == PHY_INTERFACE_MODE_SGMII) + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_100baseT_Half); + + /* Gigabit support (no half-duplex) */ + if (priv->max_speed == 1000) + mac_dev->if_support |= SUPPORTED_1000baseT_Full; + + /* The 10G interface only supports one mode */ + if (priv->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->if_support = SUPPORTED_10000baseT_Full; + + /* Get the rest of the PHY information */ + priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); + if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) { + struct phy_device *phy; + + err = of_phy_register_fixed_link(mac_node); + if (err) + goto _return_dev_set_drvdata; + + priv->fixed_link = kzalloc(sizeof(*priv->fixed_link), + GFP_KERNEL); + if (!priv->fixed_link) + goto _return_dev_set_drvdata; + + priv->phy_node = of_node_get(mac_node); + phy = of_phy_find_device(priv->phy_node); + if (!phy) + goto _return_dev_set_drvdata; + + priv->fixed_link->link = phy->link; + priv->fixed_link->speed = phy->speed; + priv->fixed_link->duplex = phy->duplex; + priv->fixed_link->pause = phy->pause; + priv->fixed_link->asym_pause = phy->asym_pause; + } + + err = mac_dev->init(mac_dev); + if (err < 0) { + dev_err(dev, "mac_dev->init() = %d\n", err); + of_node_put(priv->phy_node); + goto _return_dev_set_drvdata; + } + + /* pause frame autonegotiation enabled */ + mac_dev->autoneg_pause = true; + + /* By intializing the values to false, force FMD to enable PAUSE frames + * on RX and TX + */ + mac_dev->rx_pause_req = true; + mac_dev->tx_pause_req = true; + mac_dev->rx_pause_active = false; + mac_dev->tx_pause_active = false; + err = fman_set_mac_active_pause(mac_dev, true, true); + if (err < 0) + dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); + + dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); + + priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node); + if (IS_ERR(priv->eth_dev)) { + dev_err(dev, "failed to add Ethernet platform device for MAC %d\n", + priv->cell_index); + priv->eth_dev = NULL; + } + + goto _return; + +_return_of_node_put: + of_node_put(dev_node); +_return_dev_set_drvdata: + kfree(priv->fixed_link); + dev_set_drvdata(dev, NULL); +_return: + return err; +} + +static struct platform_driver mac_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = mac_match, + }, + .probe = mac_probe, +}; + +builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h new file mode 100644 index 000000000000..0211cc9a46d6 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -0,0 +1,97 @@ +/* Copyright 2008-2015 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MAC_H +#define __MAC_H + +#include <linux/device.h> +#include <linux/if_ether.h> +#include <linux/phy.h> +#include <linux/list.h> + +#include "fman_port.h" +#include "fman.h" +#include "fman_mac.h" + +struct fman_mac; +struct mac_priv_s; + +struct mac_device { + struct resource *res; + u8 addr[ETH_ALEN]; + struct fman_port *port[2]; + u32 if_support; + struct phy_device *phy_dev; + + bool autoneg_pause; + bool rx_pause_req; + bool tx_pause_req; + bool rx_pause_active; + bool tx_pause_active; + bool promisc; + + int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev); + int (*init)(struct mac_device *mac_dev); + int (*start)(struct mac_device *mac_dev); + int (*stop)(struct mac_device *mac_dev); + int (*set_promisc)(struct fman_mac *mac_dev, bool enable); + int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); + int (*set_multi)(struct net_device *net_dev, + struct mac_device *mac_dev); + int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); + int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority, + u16 pause_time, u16 thresh_time); + int (*set_exception)(struct fman_mac *mac_dev, + enum fman_mac_exceptions exception, bool enable); + int (*add_hash_mac_addr)(struct fman_mac *mac_dev, + enet_addr_t *eth_addr); + int (*remove_hash_mac_addr)(struct fman_mac *mac_dev, + enet_addr_t *eth_addr); + + struct fman_mac *fman_mac; + struct mac_priv_s *priv; +}; + +struct dpaa_eth_data { + struct device_node *mac_node; + struct mac_device *mac_dev; + int mac_hw_id; + int fman_hw_id; +}; + +extern const char *mac_driver_description; + +int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx); + +void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, + bool *tx_pause); + +#endif /* __MAC_H */ diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index cf8e54652df9..48a9c176e0d1 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1050,7 +1050,7 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->netdev_ops = &fs_enet_netdev_ops; ndev->watchdog_timeo = 2 * HZ; netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); - netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); + netif_tx_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); ndev->ethtool_ops = &fs_ethtool_ops; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 016743e355de..bade2f8f9b5c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -254,7 +254,7 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; - struct mii_bus* mii = fep->phydev->bus; + struct mii_bus *mii = fep->phydev->mdio.bus; struct fec_info* fec_inf = mii->priv; r = whack_reset(fep->fec.fecp); @@ -363,7 +363,7 @@ static void stop(struct net_device *dev) const struct fs_platform_info *fpi = fep->fpi; struct fec __iomem *fecp = fep->fec.fecp; - struct fec_info* feci= fep->phydev->bus->priv; + struct fec_info *feci = fep->phydev->mdio.bus->priv; int i; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 68a428de0bc0..1f015edcca22 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -172,23 +172,16 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) goto out_free_bus; new_bus->phy_mask = ~0; - new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) { - ret = -ENOMEM; - goto out_unmap_regs; - } new_bus->parent = &ofdev->dev; platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) - goto out_free_irqs; + goto out_unmap_regs; return 0; -out_free_irqs: - kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); out_free_bus: @@ -205,7 +198,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); - kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); kfree(bitbang); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 2be383e6d258..a89267b94352 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -166,23 +166,16 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed); new_bus->phy_mask = ~0; - new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) { - ret = -ENOMEM; - goto out_unmap_regs; - } new_bus->parent = &ofdev->dev; platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) - goto out_free_irqs; + goto out_unmap_regs; return 0; -out_free_irqs: - kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); out_res: @@ -200,7 +193,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) struct fec_info *fec = bus->priv; mdiobus_unregister(bus); - kfree(bus->irq); iounmap(fec->fecp); kfree(fec); mdiobus_free(bus); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 40071dad1c57..622005abf859 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -69,7 +69,6 @@ struct fsl_pq_mdio { struct fsl_pq_mdio_priv { void __iomem *map; struct fsl_pq_mii __iomem *regs; - int irqs[PHY_MAX_ADDR]; }; /* @@ -401,7 +400,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) new_bus->read = &fsl_pq_mdio_read; new_bus->write = &fsl_pq_mdio_write; new_bus->reset = &fsl_pq_mdio_reset; - new_bus->irq = priv->irqs; err = of_address_to_resource(np, 0, &res); if (err < 0) { diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3e233d924cce..2aa7b401cc3b 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -738,7 +738,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) struct gfar_private *priv = NULL; struct device_node *np = ofdev->dev.of_node; struct device_node *child = NULL; - struct property *stash; u32 stash_len = 0; u32 stash_idx = 0; unsigned int num_tx_qs, num_rx_qs; @@ -854,9 +853,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) goto err_grp_init; } - stash = of_find_property(np, "bd-stash", NULL); - - if (stash) { + if (of_property_read_bool(np, "bd-stash")) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } @@ -1348,12 +1345,12 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->poll_mode == GFAR_SQ_POLLING) { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, gfar_poll_rx_sq, GFAR_DEV_WEIGHT); - netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, gfar_poll_tx_sq, 2); } else { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, gfar_poll_rx, GFAR_DEV_WEIGHT); - netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, gfar_poll_tx, 2); } } @@ -1837,7 +1834,7 @@ static void gfar_configure_serdes(struct net_device *dev) * several seconds for it to come back. */ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); return; } @@ -1852,7 +1849,7 @@ static void gfar_configure_serdes(struct net_device *dev) BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } static int __gfar_is_rx_idle(struct gfar_private *priv) diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 650f7888e32b..cbddbe2d0429 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1385,7 +1385,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) value &= ~0x1000; /* Turn off autonegotiation */ phy_write(tbiphy, ENET_TBI_MII_CR, value); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); @@ -1705,7 +1705,7 @@ static void uec_configure_serdes(struct net_device *dev) * several seconds for it to come back. */ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); return; } @@ -1716,7 +1716,7 @@ static void uec_configure_serdes(struct net_device *dev) phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); - put_device(&tbiphy->dev); + put_device(&tbiphy->mdio.dev); } /* Configure the PHY for dev. diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index cec95ac8687d..6ca94dc3dda3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -35,7 +35,7 @@ #include <linux/phy.h> #include <linux/types.h> -#define HNAE_DRIVER_VERSION "1.3.0" +#define HNAE_DRIVER_VERSION "2.0" #define HNAE_DRIVER_NAME "hns" #define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation." #define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver" @@ -63,6 +63,7 @@ do { \ #define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0') #define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0') +#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1) #define AE_NAME_SIZE 16 /* some said the RX and TX RCB format should not be the same in the future. But @@ -144,23 +145,61 @@ enum hnae_led_state { #define HNS_RXD_ASID_S 24 #define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S) +#define HNSV2_TXD_BUFNUM_S 0 +#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S) +#define HNSV2_TXD_RI_B 1 +#define HNSV2_TXD_L4CS_B 2 +#define HNSV2_TXD_L3CS_B 3 +#define HNSV2_TXD_FE_B 4 +#define HNSV2_TXD_VLD_B 5 + +#define HNSV2_TXD_TSE_B 0 +#define HNSV2_TXD_VLAN_EN_B 1 +#define HNSV2_TXD_SNAP_B 2 +#define HNSV2_TXD_IPV6_B 3 +#define HNSV2_TXD_SCTP_B 4 + /* hardware spec ring buffer format */ struct __packed hnae_desc { __le64 addr; union { struct { - __le16 asid_bufnum_pid; + union { + __le16 asid_bufnum_pid; + __le16 asid; + }; __le16 send_size; - __le32 flag_ipoffset; - __le32 reserved_3[4]; + union { + __le32 flag_ipoffset; + struct { + __u8 bn_pid; + __u8 ra_ri_cs_fe_vld; + __u8 ip_offset; + __u8 tse_vlan_snap_v6_sctp_nth; + }; + }; + __le16 mss; + __u8 l4_len; + __u8 reserved1; + __le16 paylen; + __u8 vmid; + __u8 qid; + __le32 reserved2[2]; } tx; struct { __le32 ipoff_bnum_pid_flag; __le16 pkt_len; __le16 size; - __le32 vlan_pri_asid; - __le32 reserved_2[3]; + union { + __le32 vlan_pri_asid; + struct { + __le16 asid; + __le16 vlan_cfi_pri; + }; + }; + __le32 rss_hash; + __le32 reserved_1[2]; } rx; }; }; @@ -302,7 +341,8 @@ struct hnae_queue { void __iomem *io_base; phys_addr_t phy_base; struct hnae_ae_dev *dev; /* the device who use this queue */ - struct hnae_ring rx_ring, tx_ring; + struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; + struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp; struct hnae_handle *handle; }; @@ -435,6 +475,7 @@ struct hnae_ae_ops { int (*set_mac_addr)(struct hnae_handle *handle, void *p); int (*set_mc_addr)(struct hnae_handle *handle, void *addr); int (*set_mtu)(struct hnae_handle *handle, int new_mtu); + void (*set_tso_stats)(struct hnae_handle *handle, int enable); void (*update_stats)(struct hnae_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae_handle *handle, u64 *data); @@ -446,6 +487,12 @@ struct hnae_ae_ops { enum hnae_led_state status); void (*get_regs)(struct hnae_handle *handle, void *data); int (*get_regs_len)(struct hnae_handle *handle); + u32 (*get_rss_key_size)(struct hnae_handle *handle); + u32 (*get_rss_indir_size)(struct hnae_handle *handle); + int (*get_rss)(struct hnae_handle *handle, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rss)(struct hnae_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc); }; struct hnae_ae_dev { @@ -551,11 +598,9 @@ static inline void hnae_replace_buffer(struct hnae_ring *ring, int i, struct hnae_desc_cb *res_cb) { struct hnae_buf_ops *bops = ring->q->handle->bops; - struct hnae_desc_cb tmp_cb = ring->desc_cb[i]; bops->unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; - *res_cb = tmp_cb; ring->desc[i].addr = (__le64)ring->desc_cb[i].dma; ring->desc[i].rx.ipoff_bnum_pid_flag = 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 1a16c0307b47..522b264866b4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -252,7 +252,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) if (mac_cb->mac_type != HNAE_PORT_SERVICE) return 0; - ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE); + ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true); if (ret) { dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", @@ -261,7 +261,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) } ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM, - mac_addr, ENABLE); + mac_addr, true); if (ret) dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", @@ -277,12 +277,19 @@ static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu) return hns_mac_set_mtu(mac_cb, new_mtu); } +static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + hns_ppe_set_tso_enable(ppe_cb, enable); +} + static int hns_ae_start(struct hnae_handle *handle) { int ret; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); - ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE); + ret = hns_mac_vm_config_bc_en(mac_cb, 0, true); if (ret) return ret; @@ -309,7 +316,7 @@ void hns_ae_stop(struct hnae_handle *handle) hns_ae_ring_enable_all(handle, 0); - (void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE); + (void)hns_mac_vm_config_bc_en(mac_cb, 0, false); } static void hns_ae_reset(struct hnae_handle *handle) @@ -334,12 +341,30 @@ void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) else flag = RCB_INT_FLAG_RX; - hns_rcb_int_clr_hw(ring->q, flag); hns_rcb_int_ctrl_hw(ring->q, flag, mask); } +static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask) +{ + u32 flag; + + if (is_tx_ring(ring)) + flag = RCB_INT_FLAG_TX; + else + flag = RCB_INT_FLAG_RX; + + hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); +} + static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val) { + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev); + + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + else + hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + hns_rcb_start(queue, val); } @@ -730,6 +755,53 @@ int hns_ae_get_regs_len(struct hnae_handle *handle) return total_num; } +static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle) +{ + return HNS_PPEV2_RSS_KEY_SIZE; +} + +static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle) +{ + return HNS_PPEV2_RSS_IND_TBL_SIZE; +} + +static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + /* currently we support only one type of hash function i.e. Toep hash */ + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* get the RSS Key required by the user */ + if (key) + memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); + + /* update the current hash->queue mappings from the shadow RSS table */ + memcpy(indir, ppe_cb->rss_indir_table, HNS_PPEV2_RSS_IND_TBL_SIZE); + + return 0; +} + +static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + /* set the RSS Hash Key if specififed by the user */ + if (key) + hns_ppe_set_rss_key(ppe_cb, (int *)key); + + /* update the shadow RSS table with user specified qids */ + memcpy(ppe_cb->rss_indir_table, indir, HNS_PPEV2_RSS_IND_TBL_SIZE); + + /* now update the hardware */ + hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + + return 0; +} + static struct hnae_ae_ops hns_dsaf_ops = { .get_handle = hns_ae_get_handle, .put_handle = hns_ae_put_handle, @@ -758,19 +830,34 @@ static struct hnae_ae_ops hns_dsaf_ops = { .set_mc_addr = hns_ae_set_multicast_one, .set_mtu = hns_ae_set_mtu, .update_stats = hns_ae_update_stats, + .set_tso_stats = hns_ae_set_tso_stats, .get_stats = hns_ae_get_stats, .get_strings = hns_ae_get_strings, .get_sset_count = hns_ae_get_sset_count, .update_led_status = hns_ae_update_led_status, .set_led_id = hns_ae_cpld_set_led_id, .get_regs = hns_ae_get_regs, - .get_regs_len = hns_ae_get_regs_len + .get_regs_len = hns_ae_get_regs_len, + .get_rss_key_size = hns_ae_get_rss_key_size, + .get_rss_indir_size = hns_ae_get_rss_indir_size, + .get_rss = hns_ae_get_rss, + .set_rss = hns_ae_set_rss }; int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) { struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; + switch (dsaf_dev->dsaf_ver) { + case AE_VERSION_1: + hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq; + break; + case AE_VERSION_2: + hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq; + break; + default: + break; + } ae_dev->ops = &hns_dsaf_ops; ae_dev->dev = dsaf_dev->dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 026b38676cba..5ef0e96e918a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -283,7 +283,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, } int hns_mac_set_multi(struct hns_mac_cb *mac_cb, - u32 port_num, char *addr, u8 en) + u32 port_num, char *addr, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -295,7 +295,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, mac_entry.in_port_num = mac_cb->mac_id; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -368,7 +368,7 @@ static void hns_mac_param_get(struct mac_params *param, *retuen 0 - success , negative --fail */ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, - u32 port_num, u16 vlan_id, u8 en) + u32 port_num, u16 vlan_id, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -386,7 +386,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, mac_entry.in_port_num = mac_cb->mac_id; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -403,7 +403,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, *@en:enable *retuen 0 - success , negative --fail */ -int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en) +int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -427,7 +427,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en) return ret; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -648,7 +648,7 @@ static int hns_mac_init_ex(struct hns_mac_cb *mac_cb) hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex); - ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, ENABLE); + ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, true); if (ret) goto free_mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 7da95a7581f9..0b052191d751 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -425,8 +425,8 @@ void mac_adjust_link(struct net_device *net_dev); void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); int hns_mac_set_multi(struct hns_mac_cb *mac_cb, - u32 port_num, char *addr, u8 en); -int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, u8 en); + u32 port_num, char *addr, bool enable); +int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable); void hns_mac_start(struct hns_mac_cb *mac_cb); void hns_mac_stop(struct hns_mac_cb *mac_cb); int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index b674414a4d72..1c33bd06bd5c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -38,10 +38,10 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) const char *name, *mode_str; struct device_node *np = dsaf_dev->dev->of_node; - if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v2")) - dsaf_dev->dsaf_ver = AE_VERSION_2; - else + if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) dsaf_dev->dsaf_ver = AE_VERSION_1; + else + dsaf_dev->dsaf_ver = AE_VERSION_2; ret = of_property_read_string(np, "dsa_name", &name); if (ret) { @@ -274,6 +274,8 @@ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev, } } +#define HNS_DSAF_SBM_NUM(dev) \ + (AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM) /** * hns_dsaf_sbm_cfg - config sbm * @dsaf_id: dsa fabric id @@ -283,7 +285,7 @@ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev) u32 o_sbm_cfg; u32 i; - for (i = 0; i < DSAF_SBM_NUM; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { o_sbm_cfg = dsaf_read_dev(dsaf_dev, DSAF_SBM_CFG_REG_0_REG + 0x80 * i); dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1); @@ -304,13 +306,19 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) u32 reg; u32 read_cnt; - for (i = 0; i < DSAF_SBM_NUM; i++) { + /* validate configure by setting SBM_CFG_MIB_EN bit from 0 to 1. */ + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { + reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; + dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 0); + } + + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1); } /* waitint for all sbm enable finished */ - for (i = 0; i < DSAF_SBM_NUM; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { read_cnt = 0; reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; do { @@ -338,83 +346,156 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) */ static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) { - u32 o_sbm_bp_cfg0; - u32 o_sbm_bp_cfg1; - u32 o_sbm_bp_cfg2; - u32 o_sbm_bp_cfg3; + u32 o_sbm_bp_cfg; u32 reg; u32 i; /* XGE */ for (i = 0; i < DSAF_XGE_NUM; i++) { reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg0 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg1 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); - dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg1); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 104); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg3, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); - dsaf_set_field(o_sbm_bp_cfg3, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg3, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128); - dsaf_set_field(o_sbm_bp_cfg3, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* PPE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 10); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* RoCEE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 2); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } +} + +static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) +{ + u32 o_sbm_bp_cfg; + u32 reg; + u32 i; + + /* XGE */ + for (i = 0; i < DSAFV2_SBM_XGE_CHN; i++) { + reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S, 256); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 104); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 128); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + /* for no enable pfc mode */ + reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } + + /* PPE */ + reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + /* RoCEE */ + for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) { + reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } } @@ -985,11 +1066,38 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) else tc_cfg = HNS_DSAF_I8TC_CFG; + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + for (i = 0; i < DSAF_INODE_NUM; i++) { + reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; + dsaf_set_dev_field(dsaf_dev, reg, + DSAF_INODE_IN_PORT_NUM_M, + DSAF_INODE_IN_PORT_NUM_S, + i % DSAF_XGE_NUM); + } + } else { + for (i = 0; i < DSAF_PORT_TYPE_NUM; i++) { + reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; + dsaf_set_dev_field(dsaf_dev, reg, + DSAF_INODE_IN_PORT_NUM_M, + DSAF_INODE_IN_PORT_NUM_S, 0); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT1_NUM_M, + DSAFV2_INODE_IN_PORT1_NUM_S, 1); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT2_NUM_M, + DSAFV2_INODE_IN_PORT2_NUM_S, 2); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT3_NUM_M, + DSAFV2_INODE_IN_PORT3_NUM_S, 3); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT4_NUM_M, + DSAFV2_INODE_IN_PORT4_NUM_S, 4); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT5_NUM_M, + DSAFV2_INODE_IN_PORT5_NUM_S, 5); + } + } for (i = 0; i < DSAF_INODE_NUM; i++) { - reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; - dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M, - DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM); - reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i; dsaf_write_dev(dsaf_dev, reg, tc_cfg); } @@ -1002,10 +1110,17 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) { u32 flag; + u32 finish_msk; u32 cnt = 0; int ret; - hns_dsaf_sbm_bp_wl_cfg(dsaf_dev); + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + hns_dsaf_sbm_bp_wl_cfg(dsaf_dev); + finish_msk = DSAF_SRAM_INIT_OVER_M; + } else { + hns_dsafv2_sbm_bp_wl_cfg(dsaf_dev); + finish_msk = DSAFV2_SRAM_INIT_OVER_M; + } /* enable sbm chanel, disable sbm chanel shcut function*/ hns_dsaf_sbm_cfg(dsaf_dev); @@ -1024,11 +1139,13 @@ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) do { usleep_range(200, 210);/*udelay(200);*/ - flag = dsaf_read_dev(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG); + flag = dsaf_get_dev_field(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG, + finish_msk, DSAF_SRAM_INIT_OVER_S); cnt++; - } while (flag != DSAF_SRAM_INIT_FINISH_FLAG && cnt < DSAF_CFG_READ_CNT); + } while (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S) && + cnt < DSAF_CFG_READ_CNT); - if (flag != DSAF_SRAM_INIT_FINISH_FLAG) { + if (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S)) { dev_err(dsaf_dev->dev, "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n", dsaf_dev->ae_dev.name, flag, cnt); @@ -2011,7 +2128,7 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4); /* dsaf inode registers */ - for (i = 0; i < DSAF_SBM_NUM / DSAF_COMM_CHN; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; p[232 + i] = dsaf_read_dev(ddev, DSAF_SBM_CFG_REG_0_REG + j * 0x80); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index b2b93484995c..31c312f9826e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -19,24 +19,20 @@ struct hns_mac_cb; #define DSAF_DRV_NAME "hns_dsaf" #define DSAF_MOD_VERSION "v1.0" -#define ENABLE (0x1) -#define DISABLE (0x0) +#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 -#define HNS_DSAF_DEBUG_NW_REG_OFFSET (0x100000) +#define DSAF_BASE_INNER_PORT_NUM 127/* mac tbl qid*/ -#define DSAF_BASE_INNER_PORT_NUM (127) /* mac tbl qid*/ +#define DSAF_MAX_CHIP_NUM 2 /*max 2 chips */ -#define DSAF_MAX_CHIP_NUM (2) /*max 2 chips */ +#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE 22 -#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE (22) +#define HNS_DSAF_MAX_DESC_CNT 1024 +#define HNS_DSAF_MIN_DESC_CNT 16 -#define HNS_DSAF_MAX_DESC_CNT (1024) -#define HNS_DSAF_MIN_DESC_CNT (16) +#define DSAF_INVALID_ENTRY_IDX 0xffff -#define DSAF_INVALID_ENTRY_IDX (0xffff) - -#define DSAF_CFG_READ_CNT (30) -#define DSAF_SRAM_INIT_FINISH_FLAG (0xff) +#define DSAF_CFG_READ_CNT 30 #define MAC_NUM_OCTETS_PER_ADDR 6 @@ -274,10 +270,6 @@ struct dsaf_device { struct device *dev; struct hnae_ae_dev ae_dev; - void *priv; - - int virq[DSAF_IRQ_NUM]; - u8 __iomem *sc_base; u8 __iomem *sds_base; u8 __iomem *ppe_base; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 523e9b83d304..607c3be42241 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -149,7 +149,11 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) if (port < DSAF_SERVICE_NW_NUM) { reg_val_1 = 0x1 << port; - reg_val_2 = 0x1041041 << port; + /* there is difference between V1 and V2 in register.*/ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + reg_val_2 = 0x1041041 << port; + else + reg_val_2 = 0x2082082 << port; if (val == 0) { dsaf_write_reg(dsaf_dev->sc_base, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 67f33f185a44..f302ef9073c6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -19,6 +19,48 @@ #include "hns_dsaf_ppe.h" +void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value) +{ + dsaf_set_dev_bit(ppe_cb, PPEV2_CFG_TSO_EN_REG, 0, !!value); +} + +void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, + const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]) +{ + int key_item = 0; + + for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++) + dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4, + rss_key[key_item]); +} + +void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, + const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]) +{ + int i; + int reg_value; + + for (i = 0; i < (HNS_PPEV2_RSS_IND_TBL_SIZE / 4); i++) { + reg_value = dsaf_read_dev(ppe_cb, + PPEV2_INDRECTION_TBL_REG + i * 0x4); + + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N0_M, + PPEV2_CFG_RSS_TBL_4N0_S, + rss_tab[i * 4 + 0] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N1_M, + PPEV2_CFG_RSS_TBL_4N1_S, + rss_tab[i * 4 + 1] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N2_M, + PPEV2_CFG_RSS_TBL_4N2_S, + rss_tab[i * 4 + 2] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N3_M, + PPEV2_CFG_RSS_TBL_4N3_S, + rss_tab[i * 4 + 3] & 0x1F); + dsaf_write_dev( + ppe_cb, PPEV2_INDRECTION_TBL_REG + i * 0x4, reg_value); + } +} + static void __iomem *hns_ppe_common_get_ioaddr( struct ppe_common_cb *ppe_common) { @@ -134,6 +176,11 @@ static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb) PPE_CNT_CLR_CE_B, 1); } +static void hns_ppe_set_vlan_strip(struct hns_ppe_cb *ppe_cb, int en) +{ + dsaf_write_dev(ppe_cb, PPEV2_VLAN_STRIP_EN_REG, en); +} + /** * hns_ppe_checksum_hw - set ppe checksum caculate * @ppe_device: ppe device @@ -266,13 +313,17 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) /** * ppe_init_hw - init ppe - * @ppe_device: ppe device + * @ppe_cb: ppe device */ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) { struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb; u32 port = ppe_cb->port; struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev; + int i; + + /* get default RSS key */ + netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); hns_ppe_srst_by_port(dsaf_dev, port, 0); mdelay(10); @@ -285,8 +336,21 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE); else hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE); + hns_ppe_checksum_hw(ppe_cb, 0xffffffff); hns_ppe_cnt_clr_ce(ppe_cb); + + if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + hns_ppe_set_vlan_strip(ppe_cb, 0); + + /* set default RSS key in h/w */ + hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key); + + /* Set default indrection table in h/w */ + for (i = 0; i < HNS_PPEV2_RSS_IND_TBL_SIZE; i++) + ppe_cb->rss_indir_table[i] = i; + hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + } } /** @@ -341,13 +405,13 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index) if (ret) return; + for (i = 0; i < ppe_common->ppe_num; i++) + hns_ppe_init_hw(&ppe_common->ppe_cb[i]); + ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]); if (ret) return; - for (i = 0; i < ppe_common->ppe_num; i++) - hns_ppe_init_hw(&ppe_common->ppe_cb[i]); - hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 4894f9a0d39f..0f5cb6962acf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -25,15 +25,24 @@ #define ETH_PPE_DUMP_NUM 576 #define ETH_PPE_STATIC_NUM 12 + +#define HNS_PPEV2_RSS_IND_TBL_SIZE 256 +#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */ +#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32)) + enum ppe_qid_mode { - PPE_QID_MODE0 = 0, /* fixed queue id mode */ - PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */ - PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */ - PPE_QID_MODE3, /* switch:4TC/8TAG non switch:2Port/64VM */ - PPE_QID_MODE4, /* switch:8VM/16TAG non switch:2Port/16VM/4TC */ - PPE_QID_MODE5, /* non switch:6Port/16TAG */ - PPE_QID_MODE6, /* non switch:6Port/2VM/8TC */ - PPE_QID_MODE7, /* non switch:2Port/8VM/8TC */ + PPE_QID_MODE0 = 0, /* fixed queue id mode */ + PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */ + PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */ + PPE_QID_MODE3, /* switch:4TC/8RSS non switch:2Port/64VM */ + PPE_QID_MODE4, /* switch:8VM/16RSS non switch:2Port/16VM/4TC */ + PPE_QID_MODE5, /* switch:16VM/8TC non switch:6Port/16RSS */ + PPE_QID_MODE6, /* switch:32VM/4RSS non switch:6Port/2VM/8TC */ + PPE_QID_MODE7, /* switch:32RSS non switch:2Port/8VM/8TC */ + PPE_QID_MODE8, /* switch:6VM/4TC/4RSS non switch:2Port/16VM/4RSS */ + PPE_QID_MODE9, /* non switch:2Port/32VM/2RSS */ + PPE_QID_MODE10, /* non switch:2Port/32RSS */ + PPE_QID_MODE11, /* non switch:2Port/4TC/16RSS */ }; enum ppe_port_mode { @@ -72,6 +81,8 @@ struct hns_ppe_cb { u8 port; /* port id in dsaf */ void __iomem *io_base; int virq; + u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ + u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ }; struct ppe_common_cb { @@ -102,4 +113,9 @@ void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data); void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data); void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data); +void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value); +void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, + const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]); +void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, + const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]); #endif /* _HNS_DSAF_PPE_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 4db32c62f062..d2263c72bd8a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -136,19 +136,37 @@ void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) { - u32 clr = 1; - if (flag & RCB_INT_FLAG_TX) { - dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr); - dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr); + dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); + dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); } if (flag & RCB_INT_FLAG_RX) { - dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr); - dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr); + dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); + dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); } } +void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) +{ + u32 int_mask_en = !!mask; + + if (flag & RCB_INT_FLAG_TX) + dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); + + if (flag & RCB_INT_FLAG_RX) + dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); +} + +void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) +{ + if (flag & RCB_INT_FLAG_TX) + dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); + + if (flag & RCB_INT_FLAG_RX) + dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); +} + /** *hns_rcb_ring_enable_hw - enable ring *@ring: rcb ring @@ -193,6 +211,7 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) (u32)dma); dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); + dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, bd_size_type); dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, @@ -204,6 +223,7 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) (u32)dma); dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); + dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, bd_size_type); dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, @@ -232,9 +252,6 @@ void hns_rcb_init_hw(struct ring_pair_cb *ring) static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, u32 port_idx, u32 desc_cnt) { - if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) - port_idx = 0; - dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, desc_cnt); } @@ -249,8 +266,6 @@ static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) { - if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) - port_idx = 0; if (coalesced_frames >= rcb_common->desc_num || coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) return -EINVAL; @@ -354,6 +369,9 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, HNS_RCB_COMMON_ENDIAN); + dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); + dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); + return 0; } @@ -387,19 +405,23 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) struct rcb_common_cb *rcb_common; struct ring_pair_cb *ring_pair_cb; u32 buf_size; - u16 desc_num; - int irq_idx; + u16 desc_num, mdnum_ppkt; + bool irq_idx, is_ver1; ring_pair_cb = container_of(q, struct ring_pair_cb, q); + is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); if (ring_type == RX_RING) { ring = &q->rx_ring; ring->io_base = ring_pair_cb->q.io_base; irq_idx = HNS_RCB_IRQ_IDX_RX; + mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; } else { ring = &q->tx_ring; ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + HNS_RCB_TX_REG_OFFSET; irq_idx = HNS_RCB_IRQ_IDX_TX; + mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : + HNS_RCBV2_RING_MAX_TXBD_PER_PKT; } rcb_common = ring_pair_cb->rcb_common; @@ -414,7 +436,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) ring->buf_size = buf_size; ring->desc_num = desc_num; - ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT; + ring->max_desc_num_per_pkt = mdnum_ppkt; ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; ring->next_to_use = 0; @@ -445,14 +467,22 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) return port; } +#define SERVICE_RING_IRQ_IDX(v1) \ + ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) +#define DEBUG_RING_IRQ_IDX(v1) \ + ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) +#define DEBUG_RING_IRQ_OFFSET(v1) \ + ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) { int comm_index = rcb_common->comm_index; + bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) - return HNS_SERVICE_RING_IRQ_IDX; + return SERVICE_RING_IRQ_IDX(is_ver1); else - return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2; + return DEBUG_RING_IRQ_IDX(is_ver1) + + (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); } #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ @@ -468,6 +498,9 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) u32 ring_num = rcb_common->ring_num; int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); struct device_node *np = rcb_common->dsaf_dev->dev->of_node; + struct platform_device *pdev = + to_platform_device(rcb_common->dsaf_dev->dev); + bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); for (i = 0; i < ring_num; i++) { ring_pair_cb = &rcb_common->ring_pair_cb[i]; @@ -477,10 +510,12 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) ring_pair_cb->q.io_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); - ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] - = irq_of_parse_and_map(np, base_irq_idx + i * 2); - ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] - = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1); + ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = + is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : + platform_get_irq(pdev, base_irq_idx + i * 3 + 1); + ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = + is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : + platform_get_irq(pdev, base_irq_idx + i * 3); ring_pair_cb->q.phy_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); hns_rcb_ring_pair_get_cfg(ring_pair_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 3a2afe2dd8bb..29041b18741a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -26,6 +26,8 @@ struct rcb_common_cb; #define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN #define HNS_RCB_DEBUG_NW_ENGINE_NUM 1 #define HNS_RCB_RING_MAX_BD_PER_PKT 3 +#define HNS_RCB_RING_MAX_TXBD_PER_PKT 3 +#define HNS_RCBV2_RING_MAX_TXBD_PER_PKT 8 #define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU #define HNS_RCB_RING_MAX_PENDING_BD 1024 @@ -106,13 +108,17 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index); int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); void hns_rcb_start(struct hnae_queue *q, u32 val); void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); -void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, u16 *max_vfn, u16 *max_q_per_vf); +void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); + void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val); void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag); void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable); +void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask); +void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); + void hns_rcb_init_hw(struct ring_pair_cb *ring); void hns_rcb_reset_ring_hw(struct hnae_queue *q); void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index bdbd80423b17..5d1b746e141d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -10,21 +10,12 @@ #ifndef _DSAF_REG_H_ #define _DSAF_REG_H_ -#define HNS_GE_FIFO_ERR_INTNUM 8 -#define HNS_XGE_ERR_INTNUM 6 -#define HNS_RCB_COMM_ERR_INTNUM 12 -#define HNS_PPE_TNL_ERR_INTNUM 8 -#define HNS_DSAF_EVENT_INTNUM 21 -#define HNS_DEBUG_RING_INTNUM 4 -#define HNS_SERVICE_RING_INTNUM 256 - -#define HNS_DEBUG_RING_IRQ_IDX (HNS_GE_FIFO_ERR_INTNUM + HNS_XGE_ERR_INTNUM +\ - HNS_RCB_COMM_ERR_INTNUM + HNS_PPE_TNL_ERR_INTNUM +\ - HNS_DSAF_EVENT_INTNUM) -#define HNS_SERVICE_RING_IRQ_IDX (HNS_DEBUG_RING_IRQ_IDX +\ - HNS_DEBUG_RING_INTNUM) - -#define DSAF_IRQ_NUM 18 +#define HNS_DEBUG_RING_IRQ_IDX 55 +#define HNS_SERVICE_RING_IRQ_IDX 59 +#define HNS_DEBUG_RING_IRQ_OFFSET 2 +#define HNSV2_DEBUG_RING_IRQ_IDX 409 +#define HNSV2_SERVICE_RING_IRQ_IDX 25 +#define HNSV2_DEBUG_RING_IRQ_OFFSET 9 #define DSAF_MAX_PORT_NUM_PER_CHIP 8 #define DSAF_SERVICE_PORT_NUM_PER_DSAF 6 @@ -39,9 +30,15 @@ #define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM +#define DSAF_PORT_TYPE_NUM 3 #define DSAF_NODE_NUM 18 #define DSAF_XOD_BIG_NUM DSAF_NODE_NUM #define DSAF_SBM_NUM DSAF_NODE_NUM +#define DSAFV2_SBM_NUM 8 +#define DSAFV2_SBM_XGE_CHN 6 +#define DSAFV2_SBM_PPE_CHN 1 +#define DASFV2_ROCEE_CRD_NUM 8 + #define DSAF_VOQ_NUM DSAF_NODE_NUM #define DSAF_INODE_NUM DSAF_NODE_NUM #define DSAF_XOD_NUM 8 @@ -52,56 +49,56 @@ #define DSAF_TCAM_SUM 512 #define DSAF_LINE_SUM (2048 * 14) -#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C -#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190 -#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194 -#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300 -#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304 -#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308 -#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C -#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310 -#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314 -#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318 -#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C -#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320 -#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354 -#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00 -#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04 -#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08 -#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C -#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10 -#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14 -#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18 -#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C -#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20 -#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24 -#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48 -#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C -#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 -#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 -#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 -#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304 -#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308 -#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C -#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310 -#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314 -#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328 -#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00 -#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04 -#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08 -#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C -#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10 -#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24 -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44 +#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C +#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190 +#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194 +#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300 +#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304 +#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308 +#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C +#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310 +#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314 +#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318 +#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C +#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320 +#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354 +#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00 +#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04 +#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08 +#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C +#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10 +#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14 +#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18 +#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C +#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20 +#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24 +#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48 +#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C +#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 +#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 +#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 +#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304 +#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308 +#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C +#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310 +#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314 +#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328 +#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00 +#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04 +#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08 +#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C +#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10 +#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24 +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44 /*serdes offset**/ #define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG @@ -178,6 +175,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C +#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 #define DSAF_SBM_BP_CNT_0_0_REG 0x2018 @@ -319,6 +317,8 @@ #define PPE_CFG_TAG_GEN_REG 0x90 #define PPE_CFG_PARSE_TAG_REG 0x94 #define PPE_CFG_PRO_CHECK_EN_REG 0x98 +#define PPEV2_CFG_TSO_EN_REG 0xA0 +#define PPEV2_VLAN_STRIP_EN_REG 0xAC #define PPE_INTEN_REG 0x100 #define PPE_RINT_REG 0x104 #define PPE_INTSTS_REG 0x108 @@ -351,6 +351,8 @@ #define PPE_ECO0_REG 0x32C #define PPE_ECO1_REG 0x330 #define PPE_ECO2_REG 0x334 +#define PPEV2_INDRECTION_TBL_REG 0x800 +#define PPEV2_RSS_KEY_REG 0x900 #define RCB_COM_CFG_ENDIAN_REG 0x0 #define RCB_COM_CFG_SYS_FSH_REG 0xC @@ -431,8 +433,10 @@ #define RCB_RING_INTMSK_RXWL_REG 0x000A0 #define RCB_RING_INTSTS_RX_RING_REG 0x000A4 +#define RCBV2_RX_RING_INT_STS_REG 0x000A8 #define RCB_RING_INTMSK_TXWL_REG 0x000AC #define RCB_RING_INTSTS_TX_RING_REG 0x000B0 +#define RCBV2_TX_RING_INT_STS_REG 0x000B4 #define RCB_RING_INTMSK_RX_OVERTIME_REG 0x000B8 #define RCB_RING_INTSTS_RX_OVERTIME_REG 0x000BC #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 @@ -678,6 +682,10 @@ #define XGMAC_TRX_CORE_SRST_M 0x2080 +#define DSAF_SRAM_INIT_OVER_M 0xff +#define DSAFV2_SRAM_INIT_OVER_M 0x3ff +#define DSAF_SRAM_INIT_OVER_S 0 + #define DSAF_CFG_EN_S 0 #define DSAF_CFG_TC_MODE_S 1 #define DSAF_CFG_CRC_EN_S 2 @@ -685,6 +693,7 @@ #define DSAF_CFG_MIX_MODE_S 4 #define DSAF_CFG_STP_MODE_S 5 #define DSAF_CFG_LOCA_ADDR_EN_S 6 +#define DSAFV2_CFG_VLAN_TAG_MODE_S 17 #define DSAF_CNT_CLR_CE_S 0 #define DSAF_SNAP_EN_S 1 @@ -707,6 +716,16 @@ #define DSAF_INODE_IN_PORT_NUM_M 7 #define DSAF_INODE_IN_PORT_NUM_S 0 +#define DSAFV2_INODE_IN_PORT1_NUM_M (7ULL << 3) +#define DSAFV2_INODE_IN_PORT1_NUM_S 3 +#define DSAFV2_INODE_IN_PORT2_NUM_M (7ULL << 6) +#define DSAFV2_INODE_IN_PORT2_NUM_S 6 +#define DSAFV2_INODE_IN_PORT3_NUM_M (7ULL << 9) +#define DSAFV2_INODE_IN_PORT3_NUM_S 9 +#define DSAFV2_INODE_IN_PORT4_NUM_M (7ULL << 12) +#define DSAFV2_INODE_IN_PORT4_NUM_S 12 +#define DSAFV2_INODE_IN_PORT5_NUM_M (7ULL << 15) +#define DSAFV2_INODE_IN_PORT5_NUM_S 15 #define HNS_DSAF_I4TC_CFG 0x18688688 #define HNS_DSAF_I8TC_CFG 0x18FAC688 @@ -738,6 +757,33 @@ #define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10 #define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10) +#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9) +#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S 18 +#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 18) + +#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG2_SET_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0 +#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 9 +#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S 0 +#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 +#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) + #define DSAF_TBL_TCAM_ADDR_S 0 #define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1) @@ -797,6 +843,18 @@ #define PPE_CFG_QID_MODE_CF_QID_MODE_S 8 #define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S) +#define PPEV2_CFG_RSS_TBL_4N0_S 0 +#define PPEV2_CFG_RSS_TBL_4N0_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N0_S) + +#define PPEV2_CFG_RSS_TBL_4N1_S 8 +#define PPEV2_CFG_RSS_TBL_4N1_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N1_S) + +#define PPEV2_CFG_RSS_TBL_4N2_S 16 +#define PPEV2_CFG_RSS_TBL_4N2_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N2_S) + +#define PPEV2_CFG_RSS_TBL_4N3_S 24 +#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) + #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 08cef0dfb5db..0e30846a24f8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -33,10 +33,105 @@ #define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_INITED 1 +#define HNS_BUFFER_SIZE_2048 2048 + +#define BD_MAX_SEND_SIZE 8191 +#define SKB_TMP_LEN(SKB) \ + (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) + +static void fill_v2_desc(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) +{ + struct hnae_desc *desc = &ring->desc[ring->next_to_use]; + struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct iphdr *iphdr; + struct ipv6hdr *ipv6hdr; + struct sk_buff *skb; + int skb_tmp_len; + __be16 protocol; + u8 bn_pid = 0; + u8 rrcfv = 0; + u8 ip_offset = 0; + u8 tvsvsn = 0; + u16 mss = 0; + u8 l4_len = 0; + u16 paylen = 0; + + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + + /*config bd buffer end */ + hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); + hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + ip_offset = ETH_HLEN; + + if (protocol == htons(ETH_P_8021Q)) { + ip_offset += VLAN_HLEN; + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + + if (skb->protocol == htons(ETH_P_IP)) { + iphdr = ip_hdr(skb); + hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); + hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); + + /* check for tcp/udp header */ + if (iphdr->protocol == IPPROTO_TCP) { + hnae_set_bit(tvsvsn, + HNSV2_TXD_TSE_B, 1); + skb_tmp_len = SKB_TMP_LEN(skb); + l4_len = tcp_hdrlen(skb); + mss = mtu - skb_tmp_len - ETH_FCS_LEN; + paylen = skb->len - skb_tmp_len; + } + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); + ipv6hdr = ipv6_hdr(skb); + hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); + + /* check for tcp/udp header */ + if (ipv6hdr->nexthdr == IPPROTO_TCP) { + hnae_set_bit(tvsvsn, + HNSV2_TXD_TSE_B, 1); + skb_tmp_len = SKB_TMP_LEN(skb); + l4_len = tcp_hdrlen(skb); + mss = mtu - skb_tmp_len - ETH_FCS_LEN; + paylen = skb->len - skb_tmp_len; + } + } + desc->tx.ip_offset = ip_offset; + desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; + desc->tx.mss = cpu_to_le16(mss); + desc->tx.l4_len = l4_len; + desc->tx.paylen = cpu_to_le16(paylen); + } + } + + hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); + + desc->tx.bn_pid = bn_pid; + desc->tx.ra_ri_cs_fe_vld = rrcfv; + + ring_ptr_move_fw(ring, next_to_use); +} static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type) + int buf_num, enum hns_desc_type type, int mtu) { struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -100,47 +195,129 @@ static void unfill_desc(struct hnae_ring *ring) ring_ptr_move_bw(ring, next_to_use); } -int hns_nic_net_xmit_hw(struct net_device *ndev, - struct sk_buff *skb, - struct hns_nic_ring_data *ring_data) +static int hns_nic_maybe_stop_tx( + struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) { - struct hns_nic_priv *priv = netdev_priv(ndev); - struct device *dev = priv->dev; - struct hnae_ring *ring = ring_data->ring; - struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; + struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; int buf_num; - dma_addr_t dma; - int size, next_to_use; - int i, j; - struct sk_buff *new_skb; - - assert(ring->max_desc_num_per_pkt <= ring->desc_num); /* no. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { - if (ring_space(ring) < 1) { - ring->stats.tx_busy++; - goto out_net_tx_busy; - } + if (ring_space(ring) < 1) + return -EBUSY; new_skb = skb_copy(skb, GFP_ATOMIC); - if (!new_skb) { - ring->stats.sw_err_cnt++; - netdev_err(ndev, "no memory to xmit!\n"); - goto out_err_tx_ok; - } + if (!new_skb) + return -ENOMEM; dev_kfree_skb_any(skb); - skb = new_skb; + *out_skb = new_skb; buf_num = 1; - assert(skb_shinfo(skb)->nr_frags == 1); } else if (buf_num > ring_space(ring)) { + return -EBUSY; + } + + *bnum = buf_num; + return 0; +} + +static int hns_nic_maybe_stop_tso( + struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) +{ + int i; + int size; + int buf_num; + int frag_num; + struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; + struct skb_frag_struct *frag; + + size = skb_headlen(skb); + buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + } + + if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { + buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + if (ring_space(ring) < buf_num) + return -EBUSY; + /* manual split the send packet */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + dev_kfree_skb_any(skb); + *out_skb = new_skb; + + } else if (ring_space(ring) < buf_num) { + return -EBUSY; + } + + *bnum = buf_num; + return 0; +} + +static void fill_tso_desc(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) +{ + int frag_buf_num; + int sizeoflast; + int k; + + frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + sizeoflast = size % BD_MAX_SEND_SIZE; + sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; + + /* when the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) + fill_v2_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : BD_MAX_SEND_SIZE, + dma + BD_MAX_SEND_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + buf_num, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE, + mtu); +} + +int hns_nic_net_xmit_hw(struct net_device *ndev, + struct sk_buff *skb, + struct hns_nic_ring_data *ring_data) +{ + struct hns_nic_priv *priv = netdev_priv(ndev); + struct device *dev = priv->dev; + struct hnae_ring *ring = ring_data->ring; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int buf_num; + int seg_num; + dma_addr_t dma; + int size, next_to_use; + int i; + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: ring->stats.tx_busy++; goto out_net_tx_busy; + case -ENOMEM: + ring->stats.sw_err_cnt++; + netdev_err(ndev, "no memory to xmit!\n"); + goto out_err_tx_ok; + default: + break; } + + /* no. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; next_to_use = ring->next_to_use; /* fill the first part */ @@ -151,11 +328,11 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_err_tx_ok; } - fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num, - DESC_TYPE_SKB); + priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + buf_num, DESC_TYPE_SKB, ndev->mtu); /* fill the fragments */ - for (i = 1; i < buf_num; i++) { + for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); @@ -164,8 +341,9 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_map_frag_fail; } - fill_desc(ring, skb_frag_page(frag), size, dma, - buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE); + priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, buf_num, + DESC_TYPE_PAGE, ndev->mtu); } /*complete translate all packets*/ @@ -182,19 +360,20 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, out_map_frag_fail: - for (j = i - 1; j > 0; j--) { + while (ring->next_to_use != next_to_use) { unfill_desc(ring); - next_to_use = ring->next_to_use; - dma_unmap_page(dev, ring->desc_cb[next_to_use].dma, - ring->desc_cb[next_to_use].length, - DMA_TO_DEVICE); + if (ring->next_to_use != next_to_use) + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_single(dev, + ring->desc_cb[next_to_use].dma, + ring->desc_cb[next_to_use].length, + DMA_TO_DEVICE); } - unfill_desc(ring); - next_to_use = ring->next_to_use; - dma_unmap_single(dev, ring->desc_cb[next_to_use].dma, - ring->desc_cb[next_to_use].length, DMA_TO_DEVICE); - out_err_tx_ok: dev_kfree_skb_any(skb); @@ -313,51 +492,110 @@ static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, return max_size; } -static void -hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset) +static void hns_nic_reuse_page(struct sk_buff *skb, int i, + struct hnae_ring *ring, int pull_len, + struct hnae_desc_cb *desc_cb) { + struct hnae_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + /* avoid re-using remote pages,flag default unreuse */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) { - /* move offset up to the next cache line */ - desc_cb->page_offset += tsize; + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* if we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; - if (desc_cb->page_offset <= last_offset) { desc_cb->reuse_flag = 1; /* bump ref count on page before it is given*/ get_page(desc_cb->priv); } + return; + } + + /* move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); } } +static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) +{ + *out_bnum = hnae_get_field(bnum_flag, + HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; +} + +static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) +{ + *out_bnum = hnae_get_field(bnum_flag, + HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); +} + static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, struct sk_buff **out_skb, int *out_bnum) { struct hnae_ring *ring = ring_data->ring; struct net_device *ndev = ring_data->napi.dev; + struct hns_nic_priv *priv = netdev_priv(ndev); struct sk_buff *skb; struct hnae_desc *desc; struct hnae_desc_cb *desc_cb; unsigned char *va; - int bnum, length, size, i, truesize, last_offset; + int bnum, length, i; int pull_len; u32 bnum_flag; - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; - length = le16_to_cpu(desc->rx.pkt_len); - bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); - bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); - *out_bnum = bnum; + + prefetch(desc); + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; - skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring_data->napi, + HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } + prefetchw(skb->data); + length = le16_to_cpu(desc->rx.pkt_len); + bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); + priv->ops.get_rxd_bnum(bnum_flag, &bnum); + *out_bnum = bnum; + if (length <= HNS_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -380,13 +618,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); - size = le16_to_cpu(desc->rx.size); - truesize = ALIGN(size, L1_CACHE_BYTES); - skb_add_rx_frag(skb, 0, desc_cb->priv, - desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); - - hns_nic_reuse_page(desc_cb, truesize, last_offset); + hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); ring_ptr_move_fw(ring, next_to_clean); if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ @@ -396,13 +628,8 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, for (i = 1; i < bnum; i++) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - truesize = ALIGN(size, L1_CACHE_BYTES); - skb_add_rx_frag(skb, i, desc_cb->priv, - desc_cb->page_offset, - size, truesize); - hns_nic_reuse_page(desc_cb, truesize, last_offset); + hns_nic_reuse_page(skb, i, ring, 0, desc_cb); ring_ptr_move_fw(ring, next_to_clean); } } @@ -540,20 +767,20 @@ recv: } /* make all data has been write before submit */ - if (clean_count > 0) { - hns_nic_alloc_rx_buffers(ring_data, clean_count); - clean_count = 0; - } - if (recv_pkts < budget) { ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - rmb(); /*complete read rx ring bd number*/ - if (ex_num > 0) { - num += ex_num; + + if (ex_num > clean_count) { + num += ex_num - clean_count; + rmb(); /*complete read rx ring bd number*/ goto recv; } } + /* make all data has been write before submit */ + if (clean_count > 0) + hns_nic_alloc_rx_buffers(ring_data, clean_count); + return recv_pkts; } @@ -642,14 +869,20 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, bytes = 0; pkts = 0; - while (head != ring->next_to_clean) + while (head != ring->next_to_clean) { hns_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + } NETIF_TX_UNLOCK(ndev); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); + if (unlikely(priv->link && !netif_carrier_ok(ndev))) + netif_carrier_on(ndev); + if (unlikely(pkts && netif_carrier_ok(ndev) && (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { /* Make sure that anybody stopping the queue after this @@ -716,6 +949,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) ring_data->ring, 0); ring_data->fini_process(ring_data); + return 0; } return clean_complete; @@ -848,15 +1082,58 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx) napi_disable(&priv->ring_data[idx].napi); } -static int hns_nic_init_irq(struct hns_nic_priv *priv) +static void hns_set_irq_affinity(struct hns_nic_priv *priv) { struct hnae_handle *h = priv->ae_handle; struct hns_nic_ring_data *rd; int i; - int ret; int cpu; cpumask_t mask; + /*diffrent irq banlance for 16core and 32core*/ + if (h->q_num == num_possible_cpus()) { + for (i = 0; i < h->q_num * 2; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index)) { + cpumask_clear(&mask); + cpu = rd->queue_index; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + } else { + for (i = 0; i < h->q_num; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index * 2)) { + cpumask_clear(&mask); + cpu = rd->queue_index * 2; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + + for (i = h->q_num; i < h->q_num * 2; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index * 2 + 1)) { + cpumask_clear(&mask); + cpu = rd->queue_index * 2 + 1; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + } +} + +static int hns_nic_init_irq(struct hns_nic_priv *priv) +{ + struct hnae_handle *h = priv->ae_handle; + struct hns_nic_ring_data *rd; + int i; + int ret; + for (i = 0; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; @@ -878,16 +1155,11 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) } disable_irq(rd->ring->irq); rd->ring->irq_init_flag = RCB_IRQ_INITED; - - /*set cpu affinity*/ - if (cpu_online(rd->queue_index)) { - cpumask_clear(&mask); - cpu = rd->queue_index; - cpumask_set_cpu(cpu, &mask); - irq_set_affinity_hint(rd->ring->irq, &mask); - } } + /*set cpu affinity*/ + hns_set_irq_affinity(priv); + return 0; } @@ -1136,6 +1408,51 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) return ret; } +static int hns_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_handle *h = priv->ae_handle; + + switch (priv->enet_ver) { + case AE_VERSION_1: + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + netdev_info(netdev, "enet v1 do not support tso!\n"); + break; + default: + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = fill_tso_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; + /* The chip only support 7*4096 */ + netif_set_gso_max_size(netdev, 7 * 4096); + h->dev->ops->set_tso_stats(h, 1); + } else { + priv->ops.fill_desc = fill_v2_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + h->dev->ops->set_tso_stats(h, 0); + } + break; + } + netdev->features = features; + return 0; +} + +static netdev_features_t hns_nic_fix_features( + struct net_device *netdev, netdev_features_t features) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + + switch (priv->enet_ver) { + case AE_VERSION_1: + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_HW_VLAN_CTAG_FILTER); + break; + default: + break; + } + return features; +} + /** * nic_set_multicast_list - set mutl mac address * @netdev: net device @@ -1231,6 +1548,8 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_set_mac_address = hns_nic_net_set_mac_address, .ndo_change_mtu = hns_nic_change_mtu, .ndo_do_ioctl = hns_nic_do_ioctl, + .ndo_set_features = hns_nic_set_features, + .ndo_fix_features = hns_nic_fix_features, .ndo_get_stats64 = hns_nic_get_stats64, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = hns_nic_poll_controller, @@ -1315,22 +1634,26 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) return; hns_nic_dump(priv); - netdev_info(priv->netdev, "Reset %s port\n", - (type == HNAE_PORT_DEBUG ? "debug" : "business")); + netdev_info(priv->netdev, "try to reset %s port!\n", + (type == HNAE_PORT_DEBUG ? "debug" : "service")); rtnl_lock(); /* put off any impending NetWatchDogTimeout */ priv->netdev->trans_start = jiffies; - if (type == HNAE_PORT_DEBUG) + if (type == HNAE_PORT_DEBUG) { hns_nic_net_reinit(priv->netdev); + } else { + netif_carrier_off(priv->netdev); + netif_tx_disable(priv->netdev); + } rtnl_unlock(); } /* for doing service complete*/ static void hns_nic_service_event_complete(struct hns_nic_priv *priv) { - assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); + WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); smp_mb__before_atomic(); clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); @@ -1435,8 +1758,9 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) for (i = 0; i < h->q_num * 2; i++) { netif_napi_del(&priv->ring_data[i].napi); if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { - irq_set_affinity_hint(priv->ring_data[i].ring->irq, - NULL); + (void)irq_set_affinity_hint( + priv->ring_data[i].ring->irq, + NULL); free_irq(priv->ring_data[i].ring->irq, &priv->ring_data[i]); } @@ -1446,6 +1770,31 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) kfree(priv->ring_data); } +static void hns_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_handle *h = priv->ae_handle; + + if (AE_IS_VER1(priv->enet_ver)) { + priv->ops.fill_desc = fill_desc; + priv->ops.get_rxd_bnum = get_rx_desc_bnum; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + } else { + priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = fill_tso_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; + /* This chip only support 7*4096 */ + netif_set_gso_max_size(netdev, 7 * 4096); + h->dev->ops->set_tso_stats(h, 1); + } else { + priv->ops.fill_desc = fill_v2_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + } + } +} + static int hns_nic_try_get_ae(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); @@ -1473,6 +1822,8 @@ static int hns_nic_try_get_ae(struct net_device *ndev) goto out_init_ring_data; } + hns_nic_set_priv_ops(ndev); + ret = register_netdev(ndev); if (ret) { dev_err(priv->dev, "probe register netdev fail!\n"); @@ -1524,10 +1875,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->dev = dev; priv->netdev = ndev; - if (of_device_is_compatible(node, "hisilicon,hns-nic-v2")) - priv->enet_ver = AE_VERSION_2; - else + if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) priv->enet_ver = AE_VERSION_1; + else + priv->enet_ver = AE_VERSION_2; ret = of_property_read_string(node, "ae-name", &priv->ae_name); if (ret) @@ -1543,6 +1894,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) ndev->priv_flags |= IFF_UNICAST_FLT; ndev->netdev_ops = &hns_nic_netdev_ops; hns_ethtool_set_ops(ndev); + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; @@ -1550,6 +1902,17 @@ static int hns_nic_dev_probe(struct platform_device *pdev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; + switch (priv->enet_ver) { + case AE_VERSION_2: + ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; + ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; + break; + default: + break; + } + SET_NETDEV_DEV(ndev, dev); if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index dae0ed19ac6d..4b75270f014e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -40,6 +40,16 @@ struct hns_nic_ring_data { void (*fini_process)(struct hns_nic_ring_data *); }; +/* compatible the difference between two versions */ +struct hns_nic_ops { + void (*fill_desc)(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hnae_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + struct hns_nic_priv { const char *ae_name; u32 enet_ver; @@ -51,6 +61,8 @@ struct hns_nic_priv { struct device *dev; struct hnae_handle *ae_handle; + struct hns_nic_ops ops; + /* the cb for nic to manage the ring buffer, the first half of the * array is for tx_ring and vice versa for the second half */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a0332129970b..3df22840fcd1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -11,7 +11,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> - #include "hns_enet.h" #define HNS_PHY_PAGE_MDIX 0 @@ -72,24 +71,22 @@ static void hns_get_mdix_mode(struct net_device *net_dev, struct hns_nic_priv *priv = netdev_priv(net_dev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev || !phy_dev->bus) { + if (!phy_dev || !phy_dev->mdio.bus) { cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; return; } - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_MDIX); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_MDIX); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG); + retval = phy_read(phy_dev, HNS_PHY_CSC_REG); mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG); + retval = phy_read(phy_dev, HNS_PHY_CSS_REG); mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B); is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); switch (mdix_ctrl) { case 0x0: @@ -254,53 +251,36 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) if (en) { /* speed : 1000M */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, 2); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 21, 0x1046); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 2); + phy_write(phy_dev, 21, 0x1046); /* Force Master */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 9, 0x1F00); + phy_write(phy_dev, 9, 0x1F00); /* Soft-reset */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); + phy_write(phy_dev, 0, 0x9140); /* If autoneg disabled,two soft-reset operations */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); + phy_write(phy_dev, 0, 0x9140); + phy_write(phy_dev, 22, 0xFA); /* Default is 0x0400 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x418); + phy_write(phy_dev, 1, 0x418); /* Force 1000M Link, Default is 0x0200 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x20C); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); + phy_write(phy_dev, 7, 0x20C); + phy_write(phy_dev, 22, 0); /* Enable MAC loop-back */ - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } else { - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x400); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x200); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); - - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + phy_write(phy_dev, 22, 0xFA); + phy_write(phy_dev, 1, 0x400); + phy_write(phy_dev, 7, 0x200); + phy_write(phy_dev, 22, 0); + + val = phy_read(phy_dev, COPPER_CONTROL_REG); val &= ~PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } return 0; } @@ -667,6 +647,7 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN); + drvinfo->eedump_len = 0; } /** @@ -1018,16 +999,9 @@ int hns_phy_led_set(struct net_device *netdev, int value) struct hns_nic_priv *priv = netdev_priv(netdev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev->bus) { - netdev_err(netdev, "phy_dev->bus is null!\n"); - return -EINVAL; - } - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG, - value); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); + retval = phy_write(phy_dev, HNS_LED_FC_REG, value); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); if (retval) { netdev_err(netdev, "mdiobus_write fail !\n"); return retval; @@ -1052,19 +1026,15 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) if (phy_dev) switch (state) { case ETHTOOL_ID_ACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus, - phy_dev->addr, - HNS_LED_FC_REG); + priv->phy_led_val = phy_read(phy_dev, HNS_LED_FC_REG); - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; return 2; @@ -1079,20 +1049,18 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) return ret; break; case ETHTOOL_ID_INACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_LED_FC_REG, priv->phy_led_val); + ret = phy_write(phy_dev, HNS_LED_FC_REG, + priv->phy_led_val); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; break; @@ -1187,6 +1155,95 @@ static int hns_nic_nway_reset(struct net_device *netdev) return ret; } +static u32 +hns_get_rss_key_size(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + u32 ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + ret = ops->get_rss_key_size(priv->ae_handle); + + return ret; +} + +static u32 +hns_get_rss_indir_size(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + u32 ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + ret = ops->get_rss_indir_size(priv->ae_handle); + + return ret; +} + +static int +hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + int ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + + if (!indir) + return 0; + + ret = ops->get_rss(priv->ae_handle, indir, key, hfunc); + + return 0; +} + +static int +hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + int ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + + /* currently hfunc can only be Toeplitz hash */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + ret = ops->set_rss(priv->ae_handle, indir, key, hfunc); + + return 0; +} + static struct ethtool_ops hns_ethtool_ops = { .get_drvinfo = hns_nic_get_drvinfo, .get_link = hns_nic_get_link, @@ -1206,6 +1263,10 @@ static struct ethtool_ops hns_ethtool_ops = { .get_regs_len = hns_get_regs_len, .get_regs = hns_get_regs, .nway_reset = hns_nic_nway_reset, + .get_rxfh_key_size = hns_get_rss_key_size, + .get_rxfh_indir_size = hns_get_rss_indir_size, + .get_rxfh = hns_get_rss, + .set_rxfh = hns_set_rss, }; void hns_ethtool_set_ops(struct net_device *ndev) diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 37491c85bc42..58c96c412fe8 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -463,11 +463,6 @@ static int hns_mdio_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); mdio_dev->subctrl_vbase = NULL; } - new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR, - sizeof(int), GFP_KERNEL); - if (!new_bus->irq) - return -ENOMEM; - new_bus->parent = &pdev->dev; platform_set_drvdata(pdev, new_bus); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index ae6e30d39f0f..1d5c3e16d8f4 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d) } #ifdef CONFIG_EISA -static int __init hp100_eisa_probe (struct device *gendev) +static int hp100_eisa_probe(struct device *gendev) { struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); struct eisa_device *edev = to_eisa_device(gendev); diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig index 99c1cebd002d..37dceabf8861 100644 --- a/drivers/net/ethernet/ibm/Kconfig +++ b/drivers/net/ethernet/ibm/Kconfig @@ -37,4 +37,14 @@ config EHEA To compile the driver as a module, choose M here. The module will be called ehea. +config IBMVNIC + tristate "IBM Virtual NIC support" + depends on PPC_PSERIES + ---help--- + This driver supports Virtual NIC adapters on IBM i and IBM System p + systems. + + To compile this driver as a module, choose M here. The module will + be called ibmvnic. + endif # NET_VENDOR_IBM diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile index 2f04e71a5926..447865c8b632 100644 --- a/drivers/net/ethernet/ibm/Makefile +++ b/drivers/net/ethernet/ibm/Makefile @@ -3,5 +3,6 @@ # obj-$(CONFIG_IBMVETH) += ibmveth.o +obj-$(CONFIG_IBMVNIC) += ibmvnic.o obj-$(CONFIG_IBM_EMAC) += emac/ obj-$(CONFIG_EHEA) += ehea/ diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 7af870a3c549..335417b4756b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -169,7 +169,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) if (!pool->free_map) return -1; - pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); + pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); if (!pool->dma_addr) { kfree(pool->free_map); pool->free_map = NULL; @@ -187,8 +187,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) return -1; } - memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); - for (i = 0; i < pool->size; ++i) pool->free_map[i] = i; @@ -763,7 +761,7 @@ static netdev_features_t ibmveth_fix_features(struct net_device *dev, */ if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_CSUM_MASK; return features; } @@ -928,7 +926,8 @@ static int ibmveth_set_features(struct net_device *dev, rc1 = ibmveth_set_csum_offload(dev, rx_csum); if (rc1 && !adapter->rx_csum) dev->features = - features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + features & ~(NETIF_F_CSUM_MASK | + NETIF_F_RXCSUM); } if (large_send != adapter->large_send) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c new file mode 100644 index 000000000000..7d6570843723 --- /dev/null +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -0,0 +1,3585 @@ +/**************************************************************************/ +/* */ +/* IBM System i and System p Virtual NIC Device Driver */ +/* Copyright (C) 2014 IBM Corp. */ +/* Santiago Leon (santi_leon@yahoo.com) */ +/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ +/* John Allen (jallen@linux.vnet.ibm.com) */ +/* */ +/* This program is free software; you can redistribute it and/or modify */ +/* it under the terms of the GNU General Public License as published by */ +/* the Free Software Foundation; either version 2 of the License, or */ +/* (at your option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program. */ +/* */ +/* This module contains the implementation of a virtual ethernet device */ +/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ +/* option of the RS/6000 Platform Architecture to interface with virtual */ +/* ethernet NICs that are presented to the partition by the hypervisor. */ +/* */ +/* Messages are passed between the VNIC driver and the VNIC server using */ +/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ +/* issue and receive commands that initiate communication with the server */ +/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ +/* are used by the driver to notify the server that a packet is */ +/* ready for transmission or that a buffer has been added to receive a */ +/* packet. Subsequently, sCRQs are used by the server to notify the */ +/* driver that a packet transmission has been completed or that a packet */ +/* has been received and placed in a waiting buffer. */ +/* */ +/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ +/* which skbs are DMA mapped and immediately unmapped when the transmit */ +/* or receive has been completed, the VNIC driver is required to use */ +/* "long term mapping". This entails that large, continuous DMA mapped */ +/* buffers are allocated on driver initialization and these buffers are */ +/* then continuously reused to pass skbs to and from the VNIC server. */ +/* */ +/**************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <linux/ethtool.h> +#include <linux/proc_fs.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/irq.h> +#include <linux/kthread.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <net/net_namespace.h> +#include <asm/hvcall.h> +#include <linux/atomic.h> +#include <asm/vio.h> +#include <asm/iommu.h> +#include <linux/uaccess.h> +#include <asm/firmware.h> +#include <linux/seq_file.h> + +#include "ibmvnic.h" + +static const char ibmvnic_driver_name[] = "ibmvnic"; +static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; + +MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); +MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IBMVNIC_DRIVER_VERSION); + +static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; +static int ibmvnic_remove(struct vio_dev *); +static void release_sub_crqs(struct ibmvnic_adapter *); +static int ibmvnic_reset_crq(struct ibmvnic_adapter *); +static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); +static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); +static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); +static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, + union sub_crq *sub_crq); +static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); +static int enable_scrq_irq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static int disable_scrq_irq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static int pending_scrq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, + struct ibmvnic_sub_crq_queue *); +static int ibmvnic_poll(struct napi_struct *napi, int data); +static void send_map_query(struct ibmvnic_adapter *adapter); +static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); +static void send_request_unmap(struct ibmvnic_adapter *, u8); + +struct ibmvnic_stat { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ + offsetof(struct ibmvnic_statistics, stat)) +#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) + +static const struct ibmvnic_stat ibmvnic_stats[] = { + {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, + {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, + {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, + {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, + {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, + {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, + {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, + {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, + {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, + {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, + {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, + {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, + {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, + {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, + {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, + {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, + {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, + {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, + {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, + {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, + {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, + {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, +}; + +static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, + unsigned long length, unsigned long *number, + unsigned long *irq) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); + *number = retbuf[0]; + *irq = retbuf[1]; + + return rc; +} + +/* net_device_ops functions */ + +static void init_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *rx_pool, int num, int index, + int buff_size, int active) +{ + netdev_dbg(adapter->netdev, + "Initializing rx_pool %d, %d buffs, %d bytes each\n", + index, num, buff_size); + rx_pool->size = num; + rx_pool->index = index; + rx_pool->buff_size = buff_size; + rx_pool->active = active; +} + +static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb, int size) +{ + struct device *dev = &adapter->vdev->dev; + + ltb->size = size; + ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, + GFP_KERNEL); + + if (!ltb->buff) { + dev_err(dev, "Couldn't alloc long term buffer\n"); + return -ENOMEM; + } + ltb->map_id = adapter->map_id; + adapter->map_id++; + send_request_map(adapter, ltb->addr, + ltb->size, ltb->map_id); + init_completion(&adapter->fw_done); + wait_for_completion(&adapter->fw_done); + return 0; +} + +static void free_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb) +{ + struct device *dev = &adapter->vdev->dev; + + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + send_request_unmap(adapter, ltb->map_id); +} + +static int alloc_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *pool) +{ + struct device *dev = &adapter->vdev->dev; + int i; + + pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL); + if (!pool->free_map) + return -ENOMEM; + + pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff), + GFP_KERNEL); + + if (!pool->rx_buff) { + dev_err(dev, "Couldn't alloc rx buffers\n"); + kfree(pool->free_map); + return -ENOMEM; + } + + if (alloc_long_term_buff(adapter, &pool->long_term_buff, + pool->size * pool->buff_size)) { + kfree(pool->free_map); + kfree(pool->rx_buff); + return -ENOMEM; + } + + for (i = 0; i < pool->size; ++i) + pool->free_map[i] = i; + + atomic_set(&pool->available, 0); + pool->next_alloc = 0; + pool->next_free = 0; + + return 0; +} + +static void replenish_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *pool) +{ + int count = pool->size - atomic_read(&pool->available); + struct device *dev = &adapter->vdev->dev; + int buffers_added = 0; + unsigned long lpar_rc; + union sub_crq sub_crq; + struct sk_buff *skb; + unsigned int offset; + dma_addr_t dma_addr; + unsigned char *dst; + u64 *handle_array; + int shift = 0; + int index; + int i; + + handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf-> + off_rxadd_subcrqs)); + + for (i = 0; i < count; ++i) { + skb = alloc_skb(pool->buff_size, GFP_ATOMIC); + if (!skb) { + dev_err(dev, "Couldn't replenish rx buff\n"); + adapter->replenish_no_mem++; + break; + } + + index = pool->free_map[pool->next_free]; + + if (pool->rx_buff[index].skb) + dev_err(dev, "Inconsistent free_map!\n"); + + /* Copy the skb to the long term mapped DMA buffer */ + offset = index * pool->buff_size; + dst = pool->long_term_buff.buff + offset; + memset(dst, 0, pool->buff_size); + dma_addr = pool->long_term_buff.addr + offset; + pool->rx_buff[index].data = dst; + + pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + pool->rx_buff[index].dma = dma_addr; + pool->rx_buff[index].skb = skb; + pool->rx_buff[index].pool_index = pool->index; + pool->rx_buff[index].size = pool->buff_size; + + memset(&sub_crq, 0, sizeof(sub_crq)); + sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; + sub_crq.rx_add.correlator = + cpu_to_be64((u64)&pool->rx_buff[index]); + sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); + sub_crq.rx_add.map_id = pool->long_term_buff.map_id; + + /* The length field of the sCRQ is defined to be 24 bits so the + * buffer size needs to be left shifted by a byte before it is + * converted to big endian to prevent the last byte from being + * truncated. + */ +#ifdef __LITTLE_ENDIAN__ + shift = 8; +#endif + sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); + + lpar_rc = send_subcrq(adapter, handle_array[pool->index], + &sub_crq); + if (lpar_rc != H_SUCCESS) + goto failure; + + buffers_added++; + adapter->replenish_add_buff_success++; + pool->next_free = (pool->next_free + 1) % pool->size; + } + atomic_add(buffers_added, &pool->available); + return; + +failure: + dev_info(dev, "replenish pools failure\n"); + pool->free_map[pool->next_free] = index; + pool->rx_buff[index].skb = NULL; + if (!dma_mapping_error(dev, dma_addr)) + dma_unmap_single(dev, dma_addr, pool->buff_size, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + adapter->replenish_add_buff_failure++; + atomic_add(buffers_added, &pool->available); +} + +static void replenish_pools(struct ibmvnic_adapter *adapter) +{ + int i; + + if (adapter->migrated) + return; + + adapter->replenish_task_cycles++; + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) { + if (adapter->rx_pool[i].active) + replenish_rx_pool(adapter, &adapter->rx_pool[i]); + } +} + +static void free_rx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_pool *pool) +{ + int i; + + kfree(pool->free_map); + pool->free_map = NULL; + + if (!pool->rx_buff) + return; + + for (i = 0; i < pool->size; i++) { + if (pool->rx_buff[i].skb) { + dev_kfree_skb_any(pool->rx_buff[i].skb); + pool->rx_buff[i].skb = NULL; + } + } + kfree(pool->rx_buff); + pool->rx_buff = NULL; +} + +static int ibmvnic_open(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_pool *tx_pool; + union ibmvnic_crq crq; + int rxadd_subcrqs; + u64 *size_array; + int tx_subcrqs; + int i, j; + + rxadd_subcrqs = + be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + tx_subcrqs = + be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf-> + off_rxadd_buff_size)); + adapter->map_id = 1; + adapter->napi = kcalloc(adapter->req_rx_queues, + sizeof(struct napi_struct), GFP_KERNEL); + if (!adapter->napi) + goto alloc_napi_failed; + for (i = 0; i < adapter->req_rx_queues; i++) { + netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, + NAPI_POLL_WEIGHT); + napi_enable(&adapter->napi[i]); + } + adapter->rx_pool = + kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL); + + if (!adapter->rx_pool) + goto rx_pool_arr_alloc_failed; + send_map_query(adapter); + for (i = 0; i < rxadd_subcrqs; i++) { + init_rx_pool(adapter, &adapter->rx_pool[i], + IBMVNIC_BUFFS_PER_POOL, i, + be64_to_cpu(size_array[i]), 1); + if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) { + dev_err(dev, "Couldn't alloc rx pool\n"); + goto rx_pool_alloc_failed; + } + } + adapter->tx_pool = + kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + + if (!adapter->tx_pool) + goto tx_pool_arr_alloc_failed; + for (i = 0; i < tx_subcrqs; i++) { + tx_pool = &adapter->tx_pool[i]; + tx_pool->tx_buff = + kcalloc(adapter->max_tx_entries_per_subcrq, + sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); + if (!tx_pool->tx_buff) + goto tx_pool_alloc_failed; + + if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, + adapter->max_tx_entries_per_subcrq * + adapter->req_mtu)) + goto tx_ltb_alloc_failed; + + tx_pool->free_map = + kcalloc(adapter->max_tx_entries_per_subcrq, + sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) + goto tx_fm_alloc_failed; + + for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++) + tx_pool->free_map[j] = j; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + } + adapter->bounce_buffer_size = + (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1; + adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size, + GFP_KERNEL); + if (!adapter->bounce_buffer) + goto bounce_alloc_failed; + + adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer, + adapter->bounce_buffer_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + dev_err(dev, "Couldn't map tx bounce buffer\n"); + goto bounce_map_failed; + } + replenish_pools(adapter); + + /* We're ready to receive frames, enable the sub-crq interrupts and + * set the logical link state to up + */ + for (i = 0; i < adapter->req_rx_queues; i++) + enable_scrq_irq(adapter, adapter->rx_scrq[i]); + + for (i = 0; i < adapter->req_tx_queues; i++) + enable_scrq_irq(adapter, adapter->tx_scrq[i]); + + memset(&crq, 0, sizeof(crq)); + crq.logical_link_state.first = IBMVNIC_CRQ_CMD; + crq.logical_link_state.cmd = LOGICAL_LINK_STATE; + crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP; + ibmvnic_send_crq(adapter, &crq); + + netif_start_queue(netdev); + return 0; + +bounce_map_failed: + kfree(adapter->bounce_buffer); +bounce_alloc_failed: + i = tx_subcrqs - 1; + kfree(adapter->tx_pool[i].free_map); +tx_fm_alloc_failed: + free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff); +tx_ltb_alloc_failed: + kfree(adapter->tx_pool[i].tx_buff); +tx_pool_alloc_failed: + for (j = 0; j < i; j++) { + kfree(adapter->tx_pool[j].tx_buff); + free_long_term_buff(adapter, + &adapter->tx_pool[j].long_term_buff); + kfree(adapter->tx_pool[j].free_map); + } + kfree(adapter->tx_pool); + adapter->tx_pool = NULL; +tx_pool_arr_alloc_failed: + i = rxadd_subcrqs; +rx_pool_alloc_failed: + for (j = 0; j < i; j++) { + free_rx_pool(adapter, &adapter->rx_pool[j]); + free_long_term_buff(adapter, + &adapter->rx_pool[j].long_term_buff); + } + kfree(adapter->rx_pool); + adapter->rx_pool = NULL; +rx_pool_arr_alloc_failed: + for (i = 0; i < adapter->req_rx_queues; i++) + napi_enable(&adapter->napi[i]); +alloc_napi_failed: + return -ENOMEM; +} + +static int ibmvnic_close(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + int i; + + adapter->closing = true; + + for (i = 0; i < adapter->req_rx_queues; i++) + napi_disable(&adapter->napi[i]); + + netif_stop_queue(netdev); + + if (adapter->bounce_buffer) { + if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + dma_unmap_single(&adapter->vdev->dev, + adapter->bounce_buffer_dma, + adapter->bounce_buffer_size, + DMA_BIDIRECTIONAL); + adapter->bounce_buffer_dma = DMA_ERROR_CODE; + } + kfree(adapter->bounce_buffer); + adapter->bounce_buffer = NULL; + } + + memset(&crq, 0, sizeof(crq)); + crq.logical_link_state.first = IBMVNIC_CRQ_CMD; + crq.logical_link_state.cmd = LOGICAL_LINK_STATE; + crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN; + ibmvnic_send_crq(adapter, &crq); + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + i++) { + kfree(adapter->tx_pool[i].tx_buff); + free_long_term_buff(adapter, + &adapter->tx_pool[i].long_term_buff); + kfree(adapter->tx_pool[i].free_map); + } + kfree(adapter->tx_pool); + adapter->tx_pool = NULL; + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) { + free_rx_pool(adapter, &adapter->rx_pool[i]); + free_long_term_buff(adapter, + &adapter->rx_pool[i].long_term_buff); + } + kfree(adapter->rx_pool); + adapter->rx_pool = NULL; + + adapter->closing = false; + + return 0; +} + +static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int queue_num = skb_get_queue_mapping(skb); + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_buff *tx_buff = NULL; + struct ibmvnic_tx_pool *tx_pool; + unsigned int tx_send_failed = 0; + unsigned int tx_map_failed = 0; + unsigned int tx_dropped = 0; + unsigned int tx_packets = 0; + unsigned int tx_bytes = 0; + dma_addr_t data_dma_addr; + struct netdev_queue *txq; + bool used_bounce = false; + unsigned long lpar_rc; + union sub_crq tx_crq; + unsigned int offset; + unsigned char *dst; + u64 *handle_array; + int index = 0; + int ret = 0; + + tx_pool = &adapter->tx_pool[queue_num]; + txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); + handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf-> + off_txsubm_subcrqs)); + if (adapter->migrated) { + tx_send_failed++; + tx_dropped++; + ret = NETDEV_TX_BUSY; + goto out; + } + + index = tx_pool->free_map[tx_pool->consumer_index]; + offset = index * adapter->req_mtu; + dst = tx_pool->long_term_buff.buff + offset; + memset(dst, 0, adapter->req_mtu); + skb_copy_from_linear_data(skb, dst, skb->len); + data_dma_addr = tx_pool->long_term_buff.addr + offset; + + tx_pool->consumer_index = + (tx_pool->consumer_index + 1) % + adapter->max_tx_entries_per_subcrq; + + tx_buff = &tx_pool->tx_buff[index]; + tx_buff->skb = skb; + tx_buff->data_dma[0] = data_dma_addr; + tx_buff->data_len[0] = skb->len; + tx_buff->index = index; + tx_buff->pool_index = queue_num; + tx_buff->last_frag = true; + tx_buff->used_bounce = used_bounce; + + memset(&tx_crq, 0, sizeof(tx_crq)); + tx_crq.v1.first = IBMVNIC_CRQ_CMD; + tx_crq.v1.type = IBMVNIC_TX_DESC; + tx_crq.v1.n_crq_elem = 1; + tx_crq.v1.n_sge = 1; + tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; + tx_crq.v1.correlator = cpu_to_be32(index); + tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); + tx_crq.v1.sge_len = cpu_to_be32(skb->len); + tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); + + if (adapter->vlan_header_insertion) { + tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; + tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); + } + + if (skb->protocol == htons(ETH_P_IP)) { + if (ip_hdr(skb)->version == 4) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; + else if (ip_hdr(skb)->version == 6) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; + + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; + else if (ip_hdr(skb)->protocol != IPPROTO_TCP) + tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; + + lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq); + + if (lpar_rc != H_SUCCESS) { + dev_err(dev, "tx failed with code %ld\n", lpar_rc); + + if (tx_pool->consumer_index == 0) + tx_pool->consumer_index = + adapter->max_tx_entries_per_subcrq - 1; + else + tx_pool->consumer_index--; + + tx_send_failed++; + tx_dropped++; + ret = NETDEV_TX_BUSY; + goto out; + } + tx_packets++; + tx_bytes += skb->len; + txq->trans_start = jiffies; + ret = NETDEV_TX_OK; + +out: + netdev->stats.tx_dropped += tx_dropped; + netdev->stats.tx_bytes += tx_bytes; + netdev->stats.tx_packets += tx_packets; + adapter->tx_send_failed += tx_send_failed; + adapter->tx_map_failed += tx_map_failed; + + return ret; +} + +static void ibmvnic_set_multi(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_capability.first = IBMVNIC_CRQ_CMD; + crq.request_capability.cmd = REQUEST_CAPABILITY; + + if (netdev->flags & IFF_PROMISC) { + if (!adapter->promisc_supported) + return; + } else { + if (netdev->flags & IFF_ALLMULTI) { + /* Accept all multicast */ + memset(&crq, 0, sizeof(crq)); + crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; + crq.multicast_ctrl.cmd = MULTICAST_CTRL; + crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; + ibmvnic_send_crq(adapter, &crq); + } else if (netdev_mc_empty(netdev)) { + /* Reject all multicast */ + memset(&crq, 0, sizeof(crq)); + crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; + crq.multicast_ctrl.cmd = MULTICAST_CTRL; + crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; + ibmvnic_send_crq(adapter, &crq); + } else { + /* Accept one or more multicast(s) */ + netdev_for_each_mc_addr(ha, netdev) { + memset(&crq, 0, sizeof(crq)); + crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; + crq.multicast_ctrl.cmd = MULTICAST_CTRL; + crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; + ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], + ha->addr); + ibmvnic_send_crq(adapter, &crq); + } + } + } +} + +static int ibmvnic_set_mac(struct net_device *netdev, void *p) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + union ibmvnic_crq crq; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memset(&crq, 0, sizeof(crq)); + crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; + crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; + ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); + ibmvnic_send_crq(adapter, &crq); + /* netdev->dev_addr is changed in handle_change_mac_rsp function */ + return 0; +} + +static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu) + return -EINVAL; + + netdev->mtu = new_mtu; + return 0; +} + +static void ibmvnic_tx_timeout(struct net_device *dev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(dev); + int rc; + + /* Adapter timed out, resetting it */ + release_sub_crqs(adapter); + rc = ibmvnic_reset_crq(adapter); + if (rc) + dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n"); + else + ibmvnic_send_crq_init(adapter); +} + +static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_rx_buff *rx_buff) +{ + struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; + + rx_buff->skb = NULL; + + pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); + pool->next_alloc = (pool->next_alloc + 1) % pool->size; + + atomic_dec(&pool->available); +} + +static int ibmvnic_poll(struct napi_struct *napi, int budget) +{ + struct net_device *netdev = napi->dev; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int scrq_num = (int)(napi - adapter->napi); + int frames_processed = 0; +restart_poll: + while (frames_processed < budget) { + struct sk_buff *skb; + struct ibmvnic_rx_buff *rx_buff; + union sub_crq *next; + u32 length; + u16 offset; + u8 flags = 0; + + if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) + break; + next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); + rx_buff = + (struct ibmvnic_rx_buff *)be64_to_cpu(next-> + rx_comp.correlator); + /* do error checking */ + if (next->rx_comp.rc) { + netdev_err(netdev, "rx error %x\n", next->rx_comp.rc); + /* free the entry */ + next->rx_comp.first = 0; + remove_buff_from_pool(adapter, rx_buff); + break; + } + + length = be32_to_cpu(next->rx_comp.len); + offset = be16_to_cpu(next->rx_comp.off_frame_data); + flags = next->rx_comp.flags; + skb = rx_buff->skb; + skb_copy_to_linear_data(skb, rx_buff->data + offset, + length); + skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci); + /* free the entry */ + next->rx_comp.first = 0; + remove_buff_from_pool(adapter, rx_buff); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, netdev); + + if (flags & IBMVNIC_IP_CHKSUM_GOOD && + flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + length = skb->len; + napi_gro_receive(napi, skb); /* send it up */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += length; + frames_processed++; + } + replenish_pools(adapter); + + if (frames_processed < budget) { + enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); + napi_complete(napi); + if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && + napi_reschedule(napi)) { + disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); + goto restart_poll; + } + } + return frames_processed; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ibmvnic_netpoll_controller(struct net_device *dev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(dev); + int i; + + replenish_pools(netdev_priv(dev)); + for (i = 0; i < adapter->req_rx_queues; i++) + ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, + adapter->rx_scrq[i]); +} +#endif + +static const struct net_device_ops ibmvnic_netdev_ops = { + .ndo_open = ibmvnic_open, + .ndo_stop = ibmvnic_close, + .ndo_start_xmit = ibmvnic_xmit, + .ndo_set_rx_mode = ibmvnic_set_multi, + .ndo_set_mac_address = ibmvnic_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ibmvnic_change_mtu, + .ndo_tx_timeout = ibmvnic_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ibmvnic_netpoll_controller, +#endif +}; + +/* ethtool functions */ + +static int ibmvnic_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + ADVERTISED_FIBRE); + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_FIBRE; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 1; + return 0; +} + +static void ibmvnic_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); + strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); +} + +static u32 ibmvnic_get_msglevel(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static u32 ibmvnic_get_link(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + /* Don't need to send a query because we request a logical link up at + * init and then we wait for link state indications + */ + return adapter->logical_link_state; +} + +static void ibmvnic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + ring->rx_max_pending = 0; + ring->tx_max_pending = 0; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = 0; + ring->tx_pending = 0; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); +} + +static int ibmvnic_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ibmvnic_stats); + default: + return -EOPNOTSUPP; + } +} + +static void ibmvnic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct ibmvnic_adapter *adapter = netdev_priv(dev); + union ibmvnic_crq crq; + int i; + + memset(&crq, 0, sizeof(crq)); + crq.request_statistics.first = IBMVNIC_CRQ_CMD; + crq.request_statistics.cmd = REQUEST_STATISTICS; + crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); + crq.request_statistics.len = + cpu_to_be32(sizeof(struct ibmvnic_statistics)); + ibmvnic_send_crq(adapter, &crq); + + /* Wait for data to be written */ + init_completion(&adapter->stats_done); + wait_for_completion(&adapter->stats_done); + + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) + data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset); +} + +static const struct ethtool_ops ibmvnic_ethtool_ops = { + .get_settings = ibmvnic_get_settings, + .get_drvinfo = ibmvnic_get_drvinfo, + .get_msglevel = ibmvnic_get_msglevel, + .set_msglevel = ibmvnic_set_msglevel, + .get_link = ibmvnic_get_link, + .get_ringparam = ibmvnic_get_ringparam, + .get_strings = ibmvnic_get_strings, + .get_sset_count = ibmvnic_get_sset_count, + .get_ethtool_stats = ibmvnic_get_ethtool_stats, +}; + +/* Routines for managing CRQs/sCRQs */ + +static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + long rc; + + netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); + + /* Close the sub-crqs */ + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, + adapter->vdev->unit_address, + scrq->crq_num); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)scrq->msgs, 2); + kfree(scrq); +} + +static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter + *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_sub_crq_queue *scrq; + int rc; + + scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC); + if (!scrq) + return NULL; + + scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); + memset(scrq->msgs, 0, 4 * PAGE_SIZE); + if (!scrq->msgs) { + dev_warn(dev, "Couldn't allocate crq queue messages page\n"); + goto zero_page_failed; + } + + scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, scrq->msg_token)) { + dev_warn(dev, "Couldn't map crq queue messages page\n"); + goto map_failed; + } + + rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, + 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); + + if (rc == H_RESOURCE) + rc = ibmvnic_reset_crq(adapter); + + if (rc == H_CLOSED) { + dev_warn(dev, "Partner adapter not ready, waiting.\n"); + } else if (rc) { + dev_warn(dev, "Error %d registering sub-crq\n", rc); + goto reg_failed; + } + + scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); + if (scrq->irq == NO_IRQ) { + dev_err(dev, "Error mapping irq\n"); + goto map_irq_failed; + } + + scrq->adapter = adapter; + scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); + scrq->cur = 0; + scrq->rx_skb_top = NULL; + spin_lock_init(&scrq->lock); + + netdev_dbg(adapter->netdev, + "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", + scrq->crq_num, scrq->hw_irq, scrq->irq); + + return scrq; + +map_irq_failed: + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, + adapter->vdev->unit_address, + scrq->crq_num); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_failed: + dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, + DMA_BIDIRECTIONAL); +map_failed: + free_pages((unsigned long)scrq->msgs, 2); +zero_page_failed: + kfree(scrq); + + return NULL; +} + +static void release_sub_crqs(struct ibmvnic_adapter *adapter) +{ + int i; + + if (adapter->tx_scrq) { + for (i = 0; i < adapter->req_tx_queues; i++) + if (adapter->tx_scrq[i]) { + free_irq(adapter->tx_scrq[i]->irq, + adapter->tx_scrq[i]); + release_sub_crq_queue(adapter, + adapter->tx_scrq[i]); + } + adapter->tx_scrq = NULL; + } + + if (adapter->rx_scrq) { + for (i = 0; i < adapter->req_rx_queues; i++) + if (adapter->rx_scrq[i]) { + free_irq(adapter->rx_scrq[i]->irq, + adapter->rx_scrq[i]); + release_sub_crq_queue(adapter, + adapter->rx_scrq[i]); + } + adapter->rx_scrq = NULL; + } + + adapter->requested_caps = 0; +} + +static int disable_scrq_irq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + unsigned long rc; + + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, + H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); + if (rc) + dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", + scrq->hw_irq, rc); + return rc; +} + +static int enable_scrq_irq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + unsigned long rc; + + if (scrq->hw_irq > 0x100000000ULL) { + dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); + return 1; + } + + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, + H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); + if (rc) + dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", + scrq->hw_irq, rc); + return rc; +} + +static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_buff *txbuff; + union sub_crq *next; + int index; + int i, j; + +restart_loop: + while (pending_scrq(adapter, scrq)) { + unsigned int pool = scrq->pool_index; + + next = ibmvnic_next_scrq(adapter, scrq); + for (i = 0; i < next->tx_comp.num_comps; i++) { + if (next->tx_comp.rcs[i]) { + dev_err(dev, "tx error %x\n", + next->tx_comp.rcs[i]); + continue; + } + index = be32_to_cpu(next->tx_comp.correlators[i]); + txbuff = &adapter->tx_pool[pool].tx_buff[index]; + + for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { + if (!txbuff->data_dma[j]) + continue; + + txbuff->data_dma[j] = 0; + txbuff->used_bounce = false; + } + + if (txbuff->last_frag) + dev_kfree_skb_any(txbuff->skb); + + adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. + producer_index] = index; + adapter->tx_pool[pool].producer_index = + (adapter->tx_pool[pool].producer_index + 1) % + adapter->max_tx_entries_per_subcrq; + } + /* remove tx_comp scrq*/ + next->tx_comp.first = 0; + } + + enable_scrq_irq(adapter, scrq); + + if (pending_scrq(adapter, scrq)) { + disable_scrq_irq(adapter, scrq); + goto restart_loop; + } + + return 0; +} + +static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) +{ + struct ibmvnic_sub_crq_queue *scrq = instance; + struct ibmvnic_adapter *adapter = scrq->adapter; + + disable_scrq_irq(adapter, scrq); + ibmvnic_complete_tx(adapter, scrq); + + return IRQ_HANDLED; +} + +static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) +{ + struct ibmvnic_sub_crq_queue *scrq = instance; + struct ibmvnic_adapter *adapter = scrq->adapter; + + if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { + disable_scrq_irq(adapter, scrq); + __napi_schedule(&adapter->napi[scrq->scrq_num]); + } + + return IRQ_HANDLED; +} + +static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_sub_crq_queue **allqueues; + int registered_queues = 0; + union ibmvnic_crq crq; + int total_queues; + int more = 0; + int i, j; + int rc; + + if (!retry) { + /* Sub-CRQ entries are 32 byte long */ + int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); + + if (adapter->min_tx_entries_per_subcrq > entries_page || + adapter->min_rx_add_entries_per_subcrq > entries_page) { + dev_err(dev, "Fatal, invalid entries per sub-crq\n"); + goto allqueues_failed; + } + + /* Get the minimum between the queried max and the entries + * that fit in our PAGE_SIZE + */ + adapter->req_tx_entries_per_subcrq = + adapter->max_tx_entries_per_subcrq > entries_page ? + entries_page : adapter->max_tx_entries_per_subcrq; + adapter->req_rx_add_entries_per_subcrq = + adapter->max_rx_add_entries_per_subcrq > entries_page ? + entries_page : adapter->max_rx_add_entries_per_subcrq; + + /* Choosing the maximum number of queues supported by firmware*/ + adapter->req_tx_queues = adapter->min_tx_queues; + adapter->req_rx_queues = adapter->min_rx_queues; + adapter->req_rx_add_queues = adapter->min_rx_add_queues; + + adapter->req_mtu = adapter->max_mtu; + } + + total_queues = adapter->req_tx_queues + adapter->req_rx_queues; + + allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC); + if (!allqueues) + goto allqueues_failed; + + for (i = 0; i < total_queues; i++) { + allqueues[i] = init_sub_crq_queue(adapter); + if (!allqueues[i]) { + dev_warn(dev, "Couldn't allocate all sub-crqs\n"); + break; + } + registered_queues++; + } + + /* Make sure we were able to register the minimum number of queues */ + if (registered_queues < + adapter->min_tx_queues + adapter->min_rx_queues) { + dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); + goto tx_failed; + } + + /* Distribute the failed allocated queues*/ + for (i = 0; i < total_queues - registered_queues + more ; i++) { + netdev_dbg(adapter->netdev, "Reducing number of queues\n"); + switch (i % 3) { + case 0: + if (adapter->req_rx_queues > adapter->min_rx_queues) + adapter->req_rx_queues--; + else + more++; + break; + case 1: + if (adapter->req_tx_queues > adapter->min_tx_queues) + adapter->req_tx_queues--; + else + more++; + break; + } + } + + adapter->tx_scrq = kcalloc(adapter->req_tx_queues, + sizeof(*adapter->tx_scrq), GFP_ATOMIC); + if (!adapter->tx_scrq) + goto tx_failed; + + for (i = 0; i < adapter->req_tx_queues; i++) { + adapter->tx_scrq[i] = allqueues[i]; + adapter->tx_scrq[i]->pool_index = i; + rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx, + 0, "ibmvnic_tx", adapter->tx_scrq[i]); + if (rc) { + dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", + adapter->tx_scrq[i]->irq, rc); + goto req_tx_irq_failed; + } + } + + adapter->rx_scrq = kcalloc(adapter->req_rx_queues, + sizeof(*adapter->rx_scrq), GFP_ATOMIC); + if (!adapter->rx_scrq) + goto rx_failed; + + for (i = 0; i < adapter->req_rx_queues; i++) { + adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; + adapter->rx_scrq[i]->scrq_num = i; + rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx, + 0, "ibmvnic_rx", adapter->rx_scrq[i]); + if (rc) { + dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", + adapter->rx_scrq[i]->irq, rc); + goto req_rx_irq_failed; + } + } + + memset(&crq, 0, sizeof(crq)); + crq.request_capability.first = IBMVNIC_CRQ_CMD; + crq.request_capability.cmd = REQUEST_CAPABILITY; + + crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); + crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); + crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); + crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = + cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); + crq.request_capability.number = + cpu_to_be32(adapter->req_tx_entries_per_subcrq); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = + cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); + crq.request_capability.number = + cpu_to_be32(adapter->req_rx_add_entries_per_subcrq); + ibmvnic_send_crq(adapter, &crq); + + crq.request_capability.capability = cpu_to_be16(REQ_MTU); + crq.request_capability.number = cpu_to_be32(adapter->req_mtu); + ibmvnic_send_crq(adapter, &crq); + + if (adapter->netdev->flags & IFF_PROMISC) { + if (adapter->promisc_supported) { + crq.request_capability.capability = + cpu_to_be16(PROMISC_REQUESTED); + crq.request_capability.number = cpu_to_be32(1); + ibmvnic_send_crq(adapter, &crq); + } + } else { + crq.request_capability.capability = + cpu_to_be16(PROMISC_REQUESTED); + crq.request_capability.number = cpu_to_be32(0); + ibmvnic_send_crq(adapter, &crq); + } + + kfree(allqueues); + + return; + +req_rx_irq_failed: + for (j = 0; j < i; j++) + free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); + i = adapter->req_tx_queues; +req_tx_irq_failed: + for (j = 0; j < i; j++) + free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); + kfree(adapter->rx_scrq); + adapter->rx_scrq = NULL; +rx_failed: + kfree(adapter->tx_scrq); + adapter->tx_scrq = NULL; +tx_failed: + for (i = 0; i < registered_queues; i++) + release_sub_crq_queue(adapter, allqueues[i]); + kfree(allqueues); +allqueues_failed: + ibmvnic_remove(adapter->vdev); +} + +static int pending_scrq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + union sub_crq *entry = &scrq->msgs[scrq->cur]; + + if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing) + return 1; + else + return 0; +} + +static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + union sub_crq *entry; + unsigned long flags; + + spin_lock_irqsave(&scrq->lock, flags); + entry = &scrq->msgs[scrq->cur]; + if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { + if (++scrq->cur == scrq->size) + scrq->cur = 0; + } else { + entry = NULL; + } + spin_unlock_irqrestore(&scrq->lock, flags); + + return entry; +} + +static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *queue = &adapter->crq; + union ibmvnic_crq *crq; + + crq = &queue->msgs[queue->cur]; + if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { + if (++queue->cur == queue->size) + queue->cur = 0; + } else { + crq = NULL; + } + + return crq; +} + +static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, + union sub_crq *sub_crq) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + u64 *u64_crq = (u64 *)sub_crq; + int rc; + + netdev_dbg(adapter->netdev, + "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", + (unsigned long int)cpu_to_be64(remote_handle), + (unsigned long int)cpu_to_be64(u64_crq[0]), + (unsigned long int)cpu_to_be64(u64_crq[1]), + (unsigned long int)cpu_to_be64(u64_crq[2]), + (unsigned long int)cpu_to_be64(u64_crq[3])); + + /* Make sure the hypervisor sees the complete request */ + mb(); + + rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, + cpu_to_be64(remote_handle), + cpu_to_be64(u64_crq[0]), + cpu_to_be64(u64_crq[1]), + cpu_to_be64(u64_crq[2]), + cpu_to_be64(u64_crq[3])); + + if (rc) { + if (rc == H_CLOSED) + dev_warn(dev, "CRQ Queue closed\n"); + dev_err(dev, "Send error (rc=%d)\n", rc); + } + + return rc; +} + +static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, + union ibmvnic_crq *crq) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + u64 *u64_crq = (u64 *)crq; + int rc; + + netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", + (unsigned long int)cpu_to_be64(u64_crq[0]), + (unsigned long int)cpu_to_be64(u64_crq[1])); + + /* Make sure the hypervisor sees the complete request */ + mb(); + + rc = plpar_hcall_norets(H_SEND_CRQ, ua, + cpu_to_be64(u64_crq[0]), + cpu_to_be64(u64_crq[1])); + + if (rc) { + if (rc == H_CLOSED) + dev_warn(dev, "CRQ Queue closed\n"); + dev_warn(dev, "Send error (rc=%d)\n", rc); + } + + return rc; +} + +static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.generic.first = IBMVNIC_CRQ_INIT_CMD; + crq.generic.cmd = IBMVNIC_CRQ_INIT; + netdev_dbg(adapter->netdev, "Sending CRQ init\n"); + + return ibmvnic_send_crq(adapter, &crq); +} + +static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.generic.first = IBMVNIC_CRQ_INIT_CMD; + crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; + netdev_dbg(adapter->netdev, "Sending CRQ init complete\n"); + + return ibmvnic_send_crq(adapter, &crq); +} + +static int send_version_xchg(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.version_exchange.first = IBMVNIC_CRQ_CMD; + crq.version_exchange.cmd = VERSION_EXCHANGE; + crq.version_exchange.version = cpu_to_be16(ibmvnic_version); + + return ibmvnic_send_crq(adapter, &crq); +} + +static void send_login(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_login_rsp_buffer *login_rsp_buffer; + struct ibmvnic_login_buffer *login_buffer; + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + dma_addr_t rsp_buffer_token; + dma_addr_t buffer_token; + size_t rsp_buffer_size; + union ibmvnic_crq crq; + unsigned long flags; + size_t buffer_size; + __be64 *tx_list_p; + __be64 *rx_list_p; + int i; + + buffer_size = + sizeof(struct ibmvnic_login_buffer) + + sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues); + + login_buffer = kmalloc(buffer_size, GFP_ATOMIC); + if (!login_buffer) + goto buf_alloc_failed; + + buffer_token = dma_map_single(dev, login_buffer, buffer_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, buffer_token)) { + dev_err(dev, "Couldn't map login buffer\n"); + goto buf_map_failed; + } + + rsp_buffer_size = + sizeof(struct ibmvnic_login_rsp_buffer) + + sizeof(u64) * (adapter->req_tx_queues + + adapter->req_rx_queues * + adapter->req_rx_add_queues + adapter-> + req_rx_add_queues) + + sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS); + + login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); + if (!login_rsp_buffer) + goto buf_rsp_alloc_failed; + + rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, + rsp_buffer_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, rsp_buffer_token)) { + dev_err(dev, "Couldn't map login rsp buffer\n"); + goto buf_rsp_map_failed; + } + inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); + if (!inflight_cmd) { + dev_err(dev, "Couldn't allocate inflight_cmd\n"); + goto inflight_alloc_failed; + } + adapter->login_buf = login_buffer; + adapter->login_buf_token = buffer_token; + adapter->login_buf_sz = buffer_size; + adapter->login_rsp_buf = login_rsp_buffer; + adapter->login_rsp_buf_token = rsp_buffer_token; + adapter->login_rsp_buf_sz = rsp_buffer_size; + + login_buffer->len = cpu_to_be32(buffer_size); + login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); + login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); + login_buffer->off_txcomp_subcrqs = + cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); + login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); + login_buffer->off_rxcomp_subcrqs = + cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + + sizeof(u64) * adapter->req_tx_queues); + login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); + login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); + + tx_list_p = (__be64 *)((char *)login_buffer + + sizeof(struct ibmvnic_login_buffer)); + rx_list_p = (__be64 *)((char *)login_buffer + + sizeof(struct ibmvnic_login_buffer) + + sizeof(u64) * adapter->req_tx_queues); + + for (i = 0; i < adapter->req_tx_queues; i++) { + if (adapter->tx_scrq[i]) { + tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> + crq_num); + } + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + if (adapter->rx_scrq[i]) { + rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> + crq_num); + } + } + + netdev_dbg(adapter->netdev, "Login Buffer:\n"); + for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { + netdev_dbg(adapter->netdev, "%016lx\n", + ((unsigned long int *)(adapter->login_buf))[i]); + } + + memset(&crq, 0, sizeof(crq)); + crq.login.first = IBMVNIC_CRQ_CMD; + crq.login.cmd = LOGIN; + crq.login.ioba = cpu_to_be32(buffer_token); + crq.login.len = cpu_to_be32(buffer_size); + + memcpy(&inflight_cmd->crq, &crq, sizeof(crq)); + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_add_tail(&inflight_cmd->list, &adapter->inflight); + spin_unlock_irqrestore(&adapter->inflight_lock, flags); + + ibmvnic_send_crq(adapter, &crq); + + return; + +inflight_alloc_failed: + dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, + DMA_FROM_DEVICE); +buf_rsp_map_failed: + kfree(login_rsp_buffer); +buf_rsp_alloc_failed: + dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); +buf_map_failed: + kfree(login_buffer); +buf_alloc_failed: + return; +} + +static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, + u32 len, u8 map_id) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_map.first = IBMVNIC_CRQ_CMD; + crq.request_map.cmd = REQUEST_MAP; + crq.request_map.map_id = map_id; + crq.request_map.ioba = cpu_to_be32(addr); + crq.request_map.len = cpu_to_be32(len); + ibmvnic_send_crq(adapter, &crq); +} + +static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_unmap.first = IBMVNIC_CRQ_CMD; + crq.request_unmap.cmd = REQUEST_UNMAP; + crq.request_unmap.map_id = map_id; + ibmvnic_send_crq(adapter, &crq); +} + +static void send_map_query(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.query_map.first = IBMVNIC_CRQ_CMD; + crq.query_map.cmd = QUERY_MAP; + ibmvnic_send_crq(adapter, &crq); +} + +/* Send a series of CRQs requesting various capabilities of the VNIC server */ +static void send_cap_queries(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + + atomic_set(&adapter->running_cap_queries, 0); + memset(&crq, 0, sizeof(crq)); + crq.query_capability.first = IBMVNIC_CRQ_CMD; + crq.query_capability.cmd = QUERY_CAPABILITY; + + crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MIN_MTU); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_MTU); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = + cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); + + crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); + atomic_inc(&adapter->running_cap_queries); + ibmvnic_send_crq(adapter, &crq); +} + +static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; + union ibmvnic_crq crq; + int i; + + dma_unmap_single(dev, adapter->ip_offload_tok, + sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); + + netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); + for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) + netdev_dbg(adapter->netdev, "%016lx\n", + ((unsigned long int *)(buf))[i]); + + netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); + netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); + netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", + buf->tcp_ipv4_chksum); + netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", + buf->tcp_ipv6_chksum); + netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", + buf->udp_ipv4_chksum); + netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", + buf->udp_ipv6_chksum); + netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", + buf->large_tx_ipv4); + netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", + buf->large_tx_ipv6); + netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", + buf->large_rx_ipv4); + netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", + buf->large_rx_ipv6); + netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", + buf->max_ipv4_header_size); + netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", + buf->max_ipv6_header_size); + netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", + buf->max_tcp_header_size); + netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", + buf->max_udp_header_size); + netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", + buf->max_large_tx_size); + netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", + buf->max_large_rx_size); + netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", + buf->ipv6_extension_header); + netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", + buf->tcp_pseudosum_req); + netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", + buf->num_ipv6_ext_headers); + netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", + buf->off_ipv6_ext_headers); + + adapter->ip_offload_ctrl_tok = + dma_map_single(dev, &adapter->ip_offload_ctrl, + sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); + + if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { + dev_err(dev, "Couldn't map ip offload control buffer\n"); + return; + } + + adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); + adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; + adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; + adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; + adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; + + /* large_tx/rx disabled for now, additional features needed */ + adapter->ip_offload_ctrl.large_tx_ipv4 = 0; + adapter->ip_offload_ctrl.large_tx_ipv6 = 0; + adapter->ip_offload_ctrl.large_rx_ipv4 = 0; + adapter->ip_offload_ctrl.large_rx_ipv6 = 0; + + adapter->netdev->features = NETIF_F_GSO; + + if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) + adapter->netdev->features |= NETIF_F_IP_CSUM; + + if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) + adapter->netdev->features |= NETIF_F_IPV6_CSUM; + + memset(&crq, 0, sizeof(crq)); + crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; + crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; + crq.control_ip_offload.len = + cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); + crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); + ibmvnic_send_crq(adapter, &crq); +} + +static void handle_error_info_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_error_buff *error_buff; + unsigned long flags; + bool found = false; + int i; + + if (!crq->request_error_rsp.rc.code) { + dev_info(dev, "Request Error Rsp returned with rc=%x\n", + crq->request_error_rsp.rc.code); + return; + } + + spin_lock_irqsave(&adapter->error_list_lock, flags); + list_for_each_entry(error_buff, &adapter->errors, list) + if (error_buff->error_id == crq->request_error_rsp.error_id) { + found = true; + list_del(&error_buff->list); + break; + } + spin_unlock_irqrestore(&adapter->error_list_lock, flags); + + if (!found) { + dev_err(dev, "Couldn't find error id %x\n", + crq->request_error_rsp.error_id); + return; + } + + dev_err(dev, "Detailed info for error id %x:", + crq->request_error_rsp.error_id); + + for (i = 0; i < error_buff->len; i++) { + pr_cont("%02x", (int)error_buff->buff[i]); + if (i % 8 == 7) + pr_cont(" "); + } + pr_cont("\n"); + + dma_unmap_single(dev, error_buff->dma, error_buff->len, + DMA_FROM_DEVICE); + kfree(error_buff->buff); + kfree(error_buff); +} + +static void handle_dump_size_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + int len = be32_to_cpu(crq->request_dump_size_rsp.len); + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq newcrq; + unsigned long flags; + + /* allocate and map buffer */ + adapter->dump_data = kmalloc(len, GFP_KERNEL); + if (!adapter->dump_data) { + complete(&adapter->fw_done); + return; + } + + adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len, + DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, adapter->dump_data_token)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't map dump data\n"); + kfree(adapter->dump_data); + complete(&adapter->fw_done); + return; + } + + inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); + if (!inflight_cmd) { + dma_unmap_single(dev, adapter->dump_data_token, len, + DMA_FROM_DEVICE); + kfree(adapter->dump_data); + complete(&adapter->fw_done); + return; + } + + memset(&newcrq, 0, sizeof(newcrq)); + newcrq.request_dump.first = IBMVNIC_CRQ_CMD; + newcrq.request_dump.cmd = REQUEST_DUMP; + newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token); + newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size); + + memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq)); + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_add_tail(&inflight_cmd->list, &adapter->inflight); + spin_unlock_irqrestore(&adapter->inflight_lock, flags); + + ibmvnic_send_crq(adapter, &newcrq); +} + +static void handle_error_indication(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz); + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_error_buff *error_buff; + union ibmvnic_crq new_crq; + unsigned long flags; + + dev_err(dev, "Firmware reports %serror id %x, cause %d\n", + crq->error_indication. + flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", + crq->error_indication.error_id, + crq->error_indication.error_cause); + + error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); + if (!error_buff) + return; + + error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); + if (!error_buff->buff) { + kfree(error_buff); + return; + } + + error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, error_buff->dma)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't map error buffer\n"); + kfree(error_buff->buff); + kfree(error_buff); + return; + } + + inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); + if (!inflight_cmd) { + dma_unmap_single(dev, error_buff->dma, detail_len, + DMA_FROM_DEVICE); + kfree(error_buff->buff); + kfree(error_buff); + return; + } + + error_buff->len = detail_len; + error_buff->error_id = crq->error_indication.error_id; + + spin_lock_irqsave(&adapter->error_list_lock, flags); + list_add_tail(&error_buff->list, &adapter->errors); + spin_unlock_irqrestore(&adapter->error_list_lock, flags); + + memset(&new_crq, 0, sizeof(new_crq)); + new_crq.request_error_info.first = IBMVNIC_CRQ_CMD; + new_crq.request_error_info.cmd = REQUEST_ERROR_INFO; + new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); + new_crq.request_error_info.len = cpu_to_be32(detail_len); + new_crq.request_error_info.error_id = crq->error_indication.error_id; + + memcpy(&inflight_cmd->crq, &crq, sizeof(crq)); + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_add_tail(&inflight_cmd->list, &adapter->inflight); + spin_unlock_irqrestore(&adapter->inflight_lock, flags); + + ibmvnic_send_crq(adapter, &new_crq); +} + +static void handle_change_mac_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + rc = crq->change_mac_addr_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); + return; + } + memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], + ETH_ALEN); +} + +static void handle_request_cap_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + u64 *req_value; + char *name; + + switch (be16_to_cpu(crq->request_capability_rsp.capability)) { + case REQ_TX_QUEUES: + req_value = &adapter->req_tx_queues; + name = "tx"; + break; + case REQ_RX_QUEUES: + req_value = &adapter->req_rx_queues; + name = "rx"; + break; + case REQ_RX_ADD_QUEUES: + req_value = &adapter->req_rx_add_queues; + name = "rx_add"; + break; + case REQ_TX_ENTRIES_PER_SUBCRQ: + req_value = &adapter->req_tx_entries_per_subcrq; + name = "tx_entries_per_subcrq"; + break; + case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: + req_value = &adapter->req_rx_add_entries_per_subcrq; + name = "rx_add_entries_per_subcrq"; + break; + case REQ_MTU: + req_value = &adapter->req_mtu; + name = "mtu"; + break; + case PROMISC_REQUESTED: + req_value = &adapter->promisc; + name = "promisc"; + break; + default: + dev_err(dev, "Got invalid cap request rsp %d\n", + crq->request_capability.capability); + return; + } + + switch (crq->request_capability_rsp.rc.code) { + case SUCCESS: + break; + case PARTIALSUCCESS: + dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", + *req_value, + (long int)be32_to_cpu(crq->request_capability_rsp. + number), name); + release_sub_crqs(adapter); + *req_value = be32_to_cpu(crq->request_capability_rsp.number); + complete(&adapter->init_done); + return; + default: + dev_err(dev, "Error %d in request cap rsp\n", + crq->request_capability_rsp.rc.code); + return; + } + + /* Done receiving requested capabilities, query IP offload support */ + if (++adapter->requested_caps == 7) { + union ibmvnic_crq newcrq; + int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); + struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = + &adapter->ip_offload_buf; + + adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, + buf_sz, + DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, adapter->ip_offload_tok)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't map offload buffer\n"); + return; + } + + memset(&newcrq, 0, sizeof(newcrq)); + newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; + newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; + newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); + newcrq.query_ip_offload.ioba = + cpu_to_be32(adapter->ip_offload_tok); + + ibmvnic_send_crq(adapter, &newcrq); + } +} + +static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; + struct ibmvnic_login_buffer *login = adapter->login_buf; + union ibmvnic_crq crq; + int i; + + dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, + DMA_BIDIRECTIONAL); + dma_unmap_single(dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); + + netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); + for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { + netdev_dbg(adapter->netdev, "%016lx\n", + ((unsigned long int *)(adapter->login_rsp_buf))[i]); + } + + /* Sanity checks */ + if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || + (be32_to_cpu(login->num_rxcomp_subcrqs) * + adapter->req_rx_add_queues != + be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { + dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); + ibmvnic_remove(adapter->vdev); + return -EIO; + } + complete(&adapter->init_done); + + memset(&crq, 0, sizeof(crq)); + crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD; + crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM; + ibmvnic_send_crq(adapter, &crq); + + return 0; +} + +static void handle_request_map_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + u8 map_id = crq->request_map_rsp.map_id; + int tx_subcrqs; + int rx_subcrqs; + long rc; + int i; + + tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + + rc = crq->request_map_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc); + adapter->map_id--; + /* need to find and zero tx/rx_pool map_id */ + for (i = 0; i < tx_subcrqs; i++) { + if (adapter->tx_pool[i].long_term_buff.map_id == map_id) + adapter->tx_pool[i].long_term_buff.map_id = 0; + } + for (i = 0; i < rx_subcrqs; i++) { + if (adapter->rx_pool[i].long_term_buff.map_id == map_id) + adapter->rx_pool[i].long_term_buff.map_id = 0; + } + } + complete(&adapter->fw_done); +} + +static void handle_request_unmap_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + long rc; + + rc = crq->request_unmap_rsp.rc.code; + if (rc) + dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); +} + +static void handle_query_map_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + rc = crq->query_map_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); + return; + } + netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", + crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, + crq->query_map_rsp.free_pages); +} + +static void handle_query_cap_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + atomic_dec(&adapter->running_cap_queries); + netdev_dbg(netdev, "Outstanding queries: %d\n", + atomic_read(&adapter->running_cap_queries)); + rc = crq->query_capability.rc.code; + if (rc) { + dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); + goto out; + } + + switch (be16_to_cpu(crq->query_capability.capability)) { + case MIN_TX_QUEUES: + adapter->min_tx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_tx_queues = %lld\n", + adapter->min_tx_queues); + break; + case MIN_RX_QUEUES: + adapter->min_rx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_rx_queues = %lld\n", + adapter->min_rx_queues); + break; + case MIN_RX_ADD_QUEUES: + adapter->min_rx_add_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_rx_add_queues = %lld\n", + adapter->min_rx_add_queues); + break; + case MAX_TX_QUEUES: + adapter->max_tx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_queues = %lld\n", + adapter->max_tx_queues); + break; + case MAX_RX_QUEUES: + adapter->max_rx_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_queues = %lld\n", + adapter->max_rx_queues); + break; + case MAX_RX_ADD_QUEUES: + adapter->max_rx_add_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_add_queues = %lld\n", + adapter->max_rx_add_queues); + break; + case MIN_TX_ENTRIES_PER_SUBCRQ: + adapter->min_tx_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", + adapter->min_tx_entries_per_subcrq); + break; + case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: + adapter->min_rx_add_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", + adapter->min_rx_add_entries_per_subcrq); + break; + case MAX_TX_ENTRIES_PER_SUBCRQ: + adapter->max_tx_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", + adapter->max_tx_entries_per_subcrq); + break; + case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: + adapter->max_rx_add_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", + adapter->max_rx_add_entries_per_subcrq); + break; + case TCP_IP_OFFLOAD: + adapter->tcp_ip_offload = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "tcp_ip_offload = %lld\n", + adapter->tcp_ip_offload); + break; + case PROMISC_SUPPORTED: + adapter->promisc_supported = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "promisc_supported = %lld\n", + adapter->promisc_supported); + break; + case MIN_MTU: + adapter->min_mtu = be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); + break; + case MAX_MTU: + adapter->max_mtu = be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); + break; + case MAX_MULTICAST_FILTERS: + adapter->max_multicast_filters = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_multicast_filters = %lld\n", + adapter->max_multicast_filters); + break; + case VLAN_HEADER_INSERTION: + adapter->vlan_header_insertion = + be32_to_cpu(crq->query_capability.number); + if (adapter->vlan_header_insertion) + netdev->features |= NETIF_F_HW_VLAN_STAG_TX; + netdev_dbg(netdev, "vlan_header_insertion = %lld\n", + adapter->vlan_header_insertion); + break; + case MAX_TX_SG_ENTRIES: + adapter->max_tx_sg_entries = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", + adapter->max_tx_sg_entries); + break; + case RX_SG_SUPPORTED: + adapter->rx_sg_supported = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "rx_sg_supported = %lld\n", + adapter->rx_sg_supported); + break; + case OPT_TX_COMP_SUB_QUEUES: + adapter->opt_tx_comp_sub_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", + adapter->opt_tx_comp_sub_queues); + break; + case OPT_RX_COMP_QUEUES: + adapter->opt_rx_comp_queues = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", + adapter->opt_rx_comp_queues); + break; + case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: + adapter->opt_rx_bufadd_q_per_rx_comp_q = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", + adapter->opt_rx_bufadd_q_per_rx_comp_q); + break; + case OPT_TX_ENTRIES_PER_SUBCRQ: + adapter->opt_tx_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", + adapter->opt_tx_entries_per_subcrq); + break; + case OPT_RXBA_ENTRIES_PER_SUBCRQ: + adapter->opt_rxba_entries_per_subcrq = + be32_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", + adapter->opt_rxba_entries_per_subcrq); + break; + case TX_RX_DESC_REQ: + adapter->tx_rx_desc_req = crq->query_capability.number; + netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", + adapter->tx_rx_desc_req); + break; + + default: + netdev_err(netdev, "Got invalid cap rsp %d\n", + crq->query_capability.capability); + } + +out: + if (atomic_read(&adapter->running_cap_queries) == 0) + complete(&adapter->init_done); + /* We're done querying the capabilities, initialize sub-crqs */ +} + +static void handle_control_ras_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + u8 correlator = crq->control_ras_rsp.correlator; + struct device *dev = &adapter->vdev->dev; + bool found = false; + int i; + + if (crq->control_ras_rsp.rc.code) { + dev_warn(dev, "Control ras failed rc=%d\n", + crq->control_ras_rsp.rc.code); + return; + } + + for (i = 0; i < adapter->ras_comp_num; i++) { + if (adapter->ras_comps[i].correlator == correlator) { + found = true; + break; + } + } + + if (!found) { + dev_warn(dev, "Correlator not found on control_ras_rsp\n"); + return; + } + + switch (crq->control_ras_rsp.op) { + case IBMVNIC_TRACE_LEVEL: + adapter->ras_comps[i].trace_level = crq->control_ras.level; + break; + case IBMVNIC_ERROR_LEVEL: + adapter->ras_comps[i].error_check_level = + crq->control_ras.level; + break; + case IBMVNIC_TRACE_PAUSE: + adapter->ras_comp_int[i].paused = 1; + break; + case IBMVNIC_TRACE_RESUME: + adapter->ras_comp_int[i].paused = 0; + break; + case IBMVNIC_TRACE_ON: + adapter->ras_comps[i].trace_on = 1; + break; + case IBMVNIC_TRACE_OFF: + adapter->ras_comps[i].trace_on = 0; + break; + case IBMVNIC_CHG_TRACE_BUFF_SZ: + /* trace_buff_sz is 3 bytes, stuff it into an int */ + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0; + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] = + crq->control_ras_rsp.trace_buff_sz[0]; + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] = + crq->control_ras_rsp.trace_buff_sz[1]; + ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] = + crq->control_ras_rsp.trace_buff_sz[2]; + break; + default: + dev_err(dev, "invalid op %d on control_ras_rsp", + crq->control_ras_rsp.op); + } +} + +static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, + loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_fw_trace_entry *trace; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + dma_addr_t trace_tok; + + if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) + return 0; + + trace = + dma_alloc_coherent(dev, + be32_to_cpu(adapter->ras_comps[num]. + trace_buff_size), &trace_tok, + GFP_KERNEL); + if (!trace) { + dev_err(dev, "Couldn't alloc trace buffer\n"); + return 0; + } + + memset(&crq, 0, sizeof(crq)); + crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD; + crq.collect_fw_trace.cmd = COLLECT_FW_TRACE; + crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; + crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); + crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; + ibmvnic_send_crq(adapter, &crq); + + init_completion(&adapter->fw_done); + wait_for_completion(&adapter->fw_done); + + if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) + len = + be32_to_cpu(adapter->ras_comps[num].trace_buff_size) - + *ppos; + + copy_to_user(user_buf, &((u8 *)trace)[*ppos], len); + + dma_free_coherent(dev, + be32_to_cpu(adapter->ras_comps[num].trace_buff_size), + trace, trace_tok); + *ppos += len; + return len; +} + +static const struct file_operations trace_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = trace_read, +}; + +static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len, + loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* 1 or 0 plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused); + + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t paused_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + adapter->ras_comp_int[num].paused = val ? 1 : 0; + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = adapter->ras_comps[num].correlator; + crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations paused_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = paused_read, + .write = paused_write, +}; + +static ssize_t tracing_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* 1 or 0 plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on); + + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t tracing_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = adapter->ras_comps[num].correlator; + crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF; + + return len; +} + +static const struct file_operations tracing_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = tracing_read, + .write = tracing_write, +}; + +static ssize_t error_level_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* decimal max char plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level); + + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t error_level_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + if (val > 9) + val = 9; + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = adapter->ras_comps[num].correlator; + crq.control_ras.op = IBMVNIC_ERROR_LEVEL; + crq.control_ras.level = val; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations error_level_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = error_level_read, + .write = error_level_write, +}; + +static ssize_t trace_level_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[5]; /* decimal max char plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level); + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t trace_level_write(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + if (val > 9) + val = 9; + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = + adapter->ras_comps[ras_comp_int->num].correlator; + crq.control_ras.op = IBMVNIC_TRACE_LEVEL; + crq.control_ras.level = val; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations trace_level_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = trace_level_read, + .write = trace_level_write, +}; + +static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf, + size_t len, loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + int num = ras_comp_int->num; + char buff[9]; /* decimal max int plus \n and \0 */ + int size; + + size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size); + if (*ppos >= size) + return 0; + + copy_to_user(user_buf, buff, size); + *ppos += size; + return size; +} + +static ssize_t trace_buff_size_write(struct file *file, + const char __user *user_buf, size_t len, + loff_t *ppos) +{ + struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; + struct ibmvnic_adapter *adapter = ras_comp_int->adapter; + union ibmvnic_crq crq; + unsigned long val; + char buff[9]; /* decimal max int plus \n and \0 */ + + copy_from_user(buff, user_buf, sizeof(buff)); + val = kstrtoul(buff, 10, NULL); + + memset(&crq, 0, sizeof(crq)); + crq.control_ras.first = IBMVNIC_CRQ_CMD; + crq.control_ras.cmd = CONTROL_RAS; + crq.control_ras.correlator = + adapter->ras_comps[ras_comp_int->num].correlator; + crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ; + /* trace_buff_sz is 3 bytes, stuff an int into it */ + crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5]; + crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6]; + crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7]; + ibmvnic_send_crq(adapter, &crq); + + return len; +} + +static const struct file_operations trace_size_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_fw_comp_open, + .read = trace_buff_size_read, + .write = trace_buff_size_write, +}; + +static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct dentry *dir_ent; + struct dentry *ent; + int i; + + debugfs_remove_recursive(adapter->ras_comps_ent); + + adapter->ras_comps_ent = debugfs_create_dir("ras_comps", + adapter->debugfs_dir); + if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) { + dev_info(dev, "debugfs create ras_comps dir failed\n"); + return; + } + + for (i = 0; i < adapter->ras_comp_num; i++) { + dir_ent = debugfs_create_dir(adapter->ras_comps[i].name, + adapter->ras_comps_ent); + if (!dir_ent || IS_ERR(dir_ent)) { + dev_info(dev, "debugfs create %s dir failed\n", + adapter->ras_comps[i].name); + continue; + } + + adapter->ras_comp_int[i].adapter = adapter; + adapter->ras_comp_int[i].num = i; + adapter->ras_comp_int[i].desc_blob.data = + &adapter->ras_comps[i].description; + adapter->ras_comp_int[i].desc_blob.size = + sizeof(adapter->ras_comps[i].description); + + /* Don't need to remember the dentry's because the debugfs dir + * gets removed recursively + */ + ent = debugfs_create_blob("description", S_IRUGO, dir_ent, + &adapter->ras_comp_int[i].desc_blob); + ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR, + dir_ent, &adapter->ras_comp_int[i], + &trace_size_ops); + ent = debugfs_create_file("trace_level", + S_IRUGO | + (adapter->ras_comps[i].trace_level != + 0xFF ? S_IWUSR : 0), + dir_ent, &adapter->ras_comp_int[i], + &trace_level_ops); + ent = debugfs_create_file("error_level", + S_IRUGO | + (adapter-> + ras_comps[i].error_check_level != + 0xFF ? S_IWUSR : 0), + dir_ent, &adapter->ras_comp_int[i], + &trace_level_ops); + ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR, + dir_ent, &adapter->ras_comp_int[i], + &tracing_ops); + ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR, + dir_ent, &adapter->ras_comp_int[i], + &paused_ops); + ent = debugfs_create_file("trace", S_IRUGO, dir_ent, + &adapter->ras_comp_int[i], + &trace_ops); + } +} + +static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component); + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq newcrq; + + adapter->ras_comps = dma_alloc_coherent(dev, len, + &adapter->ras_comps_tok, + GFP_KERNEL); + if (!adapter->ras_comps) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "Couldn't alloc fw comps buffer\n"); + return; + } + + adapter->ras_comp_int = kmalloc(adapter->ras_comp_num * + sizeof(struct ibmvnic_fw_comp_internal), + GFP_KERNEL); + if (!adapter->ras_comp_int) + dma_free_coherent(dev, len, adapter->ras_comps, + adapter->ras_comps_tok); + + memset(&newcrq, 0, sizeof(newcrq)); + newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD; + newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS; + newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok); + newcrq.request_ras_comps.len = cpu_to_be32(len); + ibmvnic_send_crq(adapter, &newcrq); +} + +static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_inflight_cmd *inflight_cmd; + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_error_buff *error_buff; + unsigned long flags; + unsigned long flags2; + + spin_lock_irqsave(&adapter->inflight_lock, flags); + list_for_each_entry(inflight_cmd, &adapter->inflight, list) { + switch (inflight_cmd->crq.generic.cmd) { + case LOGIN: + dma_unmap_single(dev, adapter->login_buf_token, + adapter->login_buf_sz, + DMA_BIDIRECTIONAL); + dma_unmap_single(dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, + DMA_BIDIRECTIONAL); + kfree(adapter->login_rsp_buf); + kfree(adapter->login_buf); + break; + case REQUEST_DUMP: + complete(&adapter->fw_done); + break; + case REQUEST_ERROR_INFO: + spin_lock_irqsave(&adapter->error_list_lock, flags2); + list_for_each_entry(error_buff, &adapter->errors, + list) { + dma_unmap_single(dev, error_buff->dma, + error_buff->len, + DMA_FROM_DEVICE); + kfree(error_buff->buff); + list_del(&error_buff->list); + kfree(error_buff); + } + spin_unlock_irqrestore(&adapter->error_list_lock, + flags2); + break; + } + list_del(&inflight_cmd->list); + kfree(inflight_cmd); + } + spin_unlock_irqrestore(&adapter->inflight_lock, flags); +} + +static void ibmvnic_handle_crq(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_generic_crq *gen_crq = &crq->generic; + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->vdev->dev; + long rc; + + netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", + ((unsigned long int *)crq)[0], + ((unsigned long int *)crq)[1]); + switch (gen_crq->first) { + case IBMVNIC_CRQ_INIT_RSP: + switch (gen_crq->cmd) { + case IBMVNIC_CRQ_INIT: + dev_info(dev, "Partner initialized\n"); + /* Send back a response */ + rc = ibmvnic_send_crq_init_complete(adapter); + if (rc == 0) + send_version_xchg(adapter); + else + dev_err(dev, "Can't send initrsp rc=%ld\n", rc); + break; + case IBMVNIC_CRQ_INIT_COMPLETE: + dev_info(dev, "Partner initialization complete\n"); + send_version_xchg(adapter); + break; + default: + dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); + } + return; + case IBMVNIC_CRQ_XPORT_EVENT: + if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { + dev_info(dev, "Re-enabling adapter\n"); + adapter->migrated = true; + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); + } else { + /* The adapter lost the connection */ + dev_err(dev, "Virtual Adapter failed (rc=%d)\n", + gen_crq->cmd); + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + } + return; + case IBMVNIC_CRQ_CMD_RSP: + break; + default: + dev_err(dev, "Got an invalid msg type 0x%02x\n", + gen_crq->first); + return; + } + + switch (gen_crq->cmd) { + case VERSION_EXCHANGE_RSP: + rc = crq->version_exchange_rsp.rc.code; + if (rc) { + dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); + break; + } + dev_info(dev, "Partner protocol version is %d\n", + crq->version_exchange_rsp.version); + if (be16_to_cpu(crq->version_exchange_rsp.version) < + ibmvnic_version) + ibmvnic_version = + be16_to_cpu(crq->version_exchange_rsp.version); + send_cap_queries(adapter); + break; + case QUERY_CAPABILITY_RSP: + handle_query_cap_rsp(crq, adapter); + break; + case QUERY_MAP_RSP: + handle_query_map_rsp(crq, adapter); + break; + case REQUEST_MAP_RSP: + handle_request_map_rsp(crq, adapter); + break; + case REQUEST_UNMAP_RSP: + handle_request_unmap_rsp(crq, adapter); + break; + case REQUEST_CAPABILITY_RSP: + handle_request_cap_rsp(crq, adapter); + break; + case LOGIN_RSP: + netdev_dbg(netdev, "Got Login Response\n"); + handle_login_rsp(crq, adapter); + break; + case LOGICAL_LINK_STATE_RSP: + netdev_dbg(netdev, "Got Logical Link State Response\n"); + adapter->logical_link_state = + crq->logical_link_state_rsp.link_state; + break; + case LINK_STATE_INDICATION: + netdev_dbg(netdev, "Got Logical Link State Indication\n"); + adapter->phys_link_state = + crq->link_state_indication.phys_link_state; + adapter->logical_link_state = + crq->link_state_indication.logical_link_state; + break; + case CHANGE_MAC_ADDR_RSP: + netdev_dbg(netdev, "Got MAC address change Response\n"); + handle_change_mac_rsp(crq, adapter); + break; + case ERROR_INDICATION: + netdev_dbg(netdev, "Got Error Indication\n"); + handle_error_indication(crq, adapter); + break; + case REQUEST_ERROR_RSP: + netdev_dbg(netdev, "Got Error Detail Response\n"); + handle_error_info_rsp(crq, adapter); + break; + case REQUEST_STATISTICS_RSP: + netdev_dbg(netdev, "Got Statistics Response\n"); + complete(&adapter->stats_done); + break; + case REQUEST_DUMP_SIZE_RSP: + netdev_dbg(netdev, "Got Request Dump Size Response\n"); + handle_dump_size_rsp(crq, adapter); + break; + case REQUEST_DUMP_RSP: + netdev_dbg(netdev, "Got Request Dump Response\n"); + complete(&adapter->fw_done); + break; + case QUERY_IP_OFFLOAD_RSP: + netdev_dbg(netdev, "Got Query IP offload Response\n"); + handle_query_ip_offload_rsp(adapter); + break; + case MULTICAST_CTRL_RSP: + netdev_dbg(netdev, "Got multicast control Response\n"); + break; + case CONTROL_IP_OFFLOAD_RSP: + netdev_dbg(netdev, "Got Control IP offload Response\n"); + dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, + sizeof(adapter->ip_offload_ctrl), + DMA_TO_DEVICE); + /* We're done with the queries, perform the login */ + send_login(adapter); + break; + case REQUEST_RAS_COMP_NUM_RSP: + netdev_dbg(netdev, "Got Request RAS Comp Num Response\n"); + if (crq->request_ras_comp_num_rsp.rc.code == 10) { + netdev_dbg(netdev, "Request RAS Comp Num not supported\n"); + break; + } + adapter->ras_comp_num = + be32_to_cpu(crq->request_ras_comp_num_rsp.num_components); + handle_request_ras_comp_num_rsp(crq, adapter); + break; + case REQUEST_RAS_COMPS_RSP: + netdev_dbg(netdev, "Got Request RAS Comps Response\n"); + handle_request_ras_comps_rsp(crq, adapter); + break; + case CONTROL_RAS_RSP: + netdev_dbg(netdev, "Got Control RAS Response\n"); + handle_control_ras_rsp(crq, adapter); + break; + case COLLECT_FW_TRACE_RSP: + netdev_dbg(netdev, "Got Collect firmware trace Response\n"); + complete(&adapter->fw_done); + break; + default: + netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", + gen_crq->cmd); + } +} + +static irqreturn_t ibmvnic_interrupt(int irq, void *instance) +{ + struct ibmvnic_adapter *adapter = instance; + struct ibmvnic_crq_queue *queue = &adapter->crq; + struct vio_dev *vdev = adapter->vdev; + union ibmvnic_crq *crq; + unsigned long flags; + bool done = false; + + spin_lock_irqsave(&queue->lock, flags); + vio_disable_interrupts(vdev); + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvnic_next_crq(adapter)) != NULL) { + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } + vio_enable_interrupts(vdev); + crq = ibmvnic_next_crq(adapter); + if (crq) { + vio_disable_interrupts(vdev); + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } else { + done = true; + } + } + spin_unlock_irqrestore(&queue->lock, flags); + return IRQ_HANDLED; +} + +static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) +{ + struct vio_dev *vdev = adapter->vdev; + int rc; + + do { + rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) + dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); + + return rc; +} + +static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *crq = &adapter->crq; + struct device *dev = &adapter->vdev->dev; + struct vio_dev *vdev = adapter->vdev; + int rc; + + /* Close the CRQ */ + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + /* Clean out the queue */ + memset(crq->msgs, 0, PAGE_SIZE); + crq->cur = 0; + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_CLOSED) + /* Adapter is good, but other end is not ready */ + dev_warn(dev, "Partner adapter not ready\n"); + else if (rc != 0) + dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); + + return rc; +} + +static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *crq = &adapter->crq; + struct vio_dev *vdev = adapter->vdev; + long rc; + + netdev_dbg(adapter->netdev, "Releasing CRQ\n"); + free_irq(vdev->irq, adapter); + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_page((unsigned long)crq->msgs); +} + +static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_crq_queue *crq = &adapter->crq; + struct device *dev = &adapter->vdev->dev; + struct vio_dev *vdev = adapter->vdev; + int rc, retrc = -ENOMEM; + + crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); + /* Should we allocate more than one page? */ + + if (!crq->msgs) + return -ENOMEM; + + crq->size = PAGE_SIZE / sizeof(*crq->msgs); + crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, crq->msg_token)) + goto map_failed; + + rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_RESOURCE) + /* maybe kexecing and resource is busy. try a reset */ + rc = ibmvnic_reset_crq(adapter); + retrc = rc; + + if (rc == H_CLOSED) { + dev_warn(dev, "Partner adapter not ready\n"); + } else if (rc) { + dev_warn(dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + retrc = 0; + + netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); + rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, + adapter); + if (rc) { + dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", + vdev->irq, rc); + goto req_irq_failed; + } + + rc = vio_enable_interrupts(vdev); + if (rc) { + dev_err(dev, "Error %d enabling interrupts\n", rc); + goto req_irq_failed; + } + + crq->cur = 0; + spin_lock_init(&crq->lock); + + return retrc; + +req_irq_failed: + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_crq_failed: + dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); +map_failed: + free_page((unsigned long)crq->msgs); + return retrc; +} + +/* debugfs for dump */ +static int ibmvnic_dump_show(struct seq_file *seq, void *v) +{ + struct net_device *netdev = seq->private; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + + memset(&crq, 0, sizeof(crq)); + crq.request_dump_size.first = IBMVNIC_CRQ_CMD; + crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; + ibmvnic_send_crq(adapter, &crq); + + init_completion(&adapter->fw_done); + wait_for_completion(&adapter->fw_done); + + seq_write(seq, adapter->dump_data, adapter->dump_data_size); + + dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size, + DMA_BIDIRECTIONAL); + + kfree(adapter->dump_data); + + return 0; +} + +static int ibmvnic_dump_open(struct inode *inode, struct file *file) +{ + return single_open(file, ibmvnic_dump_show, inode->i_private); +} + +static const struct file_operations ibmvnic_dump_ops = { + .owner = THIS_MODULE, + .open = ibmvnic_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) +{ + struct ibmvnic_adapter *adapter; + struct net_device *netdev; + unsigned char *mac_addr_p; + struct dentry *ent; + char buf[16]; /* debugfs name buf */ + int rc; + + dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", + dev->unit_address); + + mac_addr_p = (unsigned char *)vio_get_attribute(dev, + VETH_MAC_ADDR, NULL); + if (!mac_addr_p) { + dev_err(&dev->dev, + "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", + __FILE__, __LINE__); + return 0; + } + + netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), + IBMVNIC_MAX_TX_QUEUES); + if (!netdev) + return -ENOMEM; + + adapter = netdev_priv(netdev); + dev_set_drvdata(&dev->dev, netdev); + adapter->vdev = dev; + adapter->netdev = netdev; + + ether_addr_copy(adapter->mac_addr, mac_addr_p); + ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + netdev->irq = dev->irq; + netdev->netdev_ops = &ibmvnic_netdev_ops; + netdev->ethtool_ops = &ibmvnic_ethtool_ops; + SET_NETDEV_DEV(netdev, &dev->dev); + + spin_lock_init(&adapter->stats_lock); + + rc = ibmvnic_init_crq_queue(adapter); + if (rc) { + dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc); + goto free_netdev; + } + + INIT_LIST_HEAD(&adapter->errors); + INIT_LIST_HEAD(&adapter->inflight); + spin_lock_init(&adapter->error_list_lock); + spin_lock_init(&adapter->inflight_lock); + + adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats, + sizeof(struct ibmvnic_statistics), + DMA_FROM_DEVICE); + if (dma_mapping_error(&dev->dev, adapter->stats_token)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(&dev->dev, "Couldn't map stats buffer\n"); + goto free_crq; + } + + snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address); + ent = debugfs_create_dir(buf, NULL); + if (!ent || IS_ERR(ent)) { + dev_info(&dev->dev, "debugfs create directory failed\n"); + adapter->debugfs_dir = NULL; + } else { + adapter->debugfs_dir = ent; + ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir, + netdev, &ibmvnic_dump_ops); + if (!ent || IS_ERR(ent)) { + dev_info(&dev->dev, + "debugfs create dump file failed\n"); + adapter->debugfs_dump = NULL; + } else { + adapter->debugfs_dump = ent; + } + } + ibmvnic_send_crq_init(adapter); + + init_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + + /* needed to pull init_sub_crqs outside of an interrupt context + * because it creates IRQ mappings for the subCRQ queues, causing + * a kernel warning + */ + init_sub_crqs(adapter, 0); + + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + + /* if init_sub_crqs is partially successful, retry */ + while (!adapter->tx_scrq || !adapter->rx_scrq) { + init_sub_crqs(adapter, 1); + + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + } + + netdev->real_num_tx_queues = adapter->req_tx_queues; + + rc = register_netdev(netdev); + if (rc) { + dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); + goto free_debugfs; + } + dev_info(&dev->dev, "ibmvnic registered\n"); + + return 0; + +free_debugfs: + if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) + debugfs_remove_recursive(adapter->debugfs_dir); +free_crq: + ibmvnic_release_crq_queue(adapter); +free_netdev: + free_netdev(netdev); + return rc; +} + +static int ibmvnic_remove(struct vio_dev *dev) +{ + struct net_device *netdev = dev_get_drvdata(&dev->dev); + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + + release_sub_crqs(adapter); + + ibmvnic_release_crq_queue(adapter); + + if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) + debugfs_remove_recursive(adapter->debugfs_dir); + + if (adapter->ras_comps) + dma_free_coherent(&dev->dev, + adapter->ras_comp_num * + sizeof(struct ibmvnic_fw_component), + adapter->ras_comps, adapter->ras_comps_tok); + + kfree(adapter->ras_comp_int); + + free_netdev(netdev); + dev_set_drvdata(&dev->dev, NULL); + + return 0; +} + +static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) +{ + struct net_device *netdev = dev_get_drvdata(&vdev->dev); + struct ibmvnic_adapter *adapter; + struct iommu_table *tbl; + unsigned long ret = 0; + int i; + + tbl = get_iommu_table_base(&vdev->dev); + + /* netdev inits at probe time along with the structures we need below*/ + if (!netdev) + return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); + + adapter = netdev_priv(netdev); + + ret += PAGE_SIZE; /* the crq message queue */ + ret += adapter->bounce_buffer_size; + ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); + + for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) + ret += 4 * PAGE_SIZE; /* the scrq message queue */ + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) + ret += adapter->rx_pool[i].size * + IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); + + return ret; +} + +static int ibmvnic_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int i; + + /* kick the interrupt handlers just in case we lost an interrupt */ + for (i = 0; i < adapter->req_rx_queues; i++) + ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, + adapter->rx_scrq[i]); + + return 0; +} + +static struct vio_device_id ibmvnic_device_table[] = { + {"network", "IBM,vnic"}, + {"", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); + +static const struct dev_pm_ops ibmvnic_pm_ops = { + .resume = ibmvnic_resume +}; + +static struct vio_driver ibmvnic_driver = { + .id_table = ibmvnic_device_table, + .probe = ibmvnic_probe, + .remove = ibmvnic_remove, + .get_desired_dma = ibmvnic_get_desired_dma, + .name = ibmvnic_driver_name, + .pm = &ibmvnic_pm_ops, +}; + +/* module functions */ +static int __init ibmvnic_module_init(void) +{ + pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, + IBMVNIC_DRIVER_VERSION); + + return vio_register_driver(&ibmvnic_driver); +} + +static void __exit ibmvnic_module_exit(void) +{ + vio_unregister_driver(&ibmvnic_driver); +} + +module_init(ibmvnic_module_init); +module_exit(ibmvnic_module_exit); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h new file mode 100644 index 000000000000..1242925ad34c --- /dev/null +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -0,0 +1,1046 @@ +/**************************************************************************/ +/* */ +/* IBM System i and System p Virtual NIC Device Driver */ +/* Copyright (C) 2014 IBM Corp. */ +/* Santiago Leon (santi_leon@yahoo.com) */ +/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ +/* John Allen (jallen@linux.vnet.ibm.com) */ +/* */ +/* This program is free software; you can redistribute it and/or modify */ +/* it under the terms of the GNU General Public License as published by */ +/* the Free Software Foundation; either version 2 of the License, or */ +/* (at your option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program. */ +/* */ +/* This module contains the implementation of a virtual ethernet device */ +/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ +/* option of the RS/6000 Platform Architecture to interface with virtual */ +/* ethernet NICs that are presented to the partition by the hypervisor. */ +/* */ +/**************************************************************************/ + +#define IBMVNIC_NAME "ibmvnic" +#define IBMVNIC_DRIVER_VERSION "1.0" +#define IBMVNIC_INVALID_MAP -1 +#define IBMVNIC_STATS_TIMEOUT 1 +/* basic structures plus 100 2k buffers */ +#define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305 + +/* Initial module_parameters */ +#define IBMVNIC_RX_WEIGHT 16 +/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ +#define IBMVNIC_BUFFS_PER_POOL 100 +#define IBMVNIC_MAX_TX_QUEUES 5 + +struct ibmvnic_login_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_LB 1 + __be32 num_txcomp_subcrqs; + __be32 off_txcomp_subcrqs; + __be32 num_rxcomp_subcrqs; + __be32 off_rxcomp_subcrqs; + __be32 login_rsp_ioba; + __be32 login_rsp_len; +} __packed __aligned(8); + +struct ibmvnic_login_rsp_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_LRB 1 + __be32 num_txsubm_subcrqs; + __be32 off_txsubm_subcrqs; + __be32 num_rxadd_subcrqs; + __be32 off_rxadd_subcrqs; + __be32 off_rxadd_buff_size; + __be32 num_supp_tx_desc; + __be32 off_supp_tx_desc; +} __packed __aligned(8); + +struct ibmvnic_query_ip_offload_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_IOB 1 + u8 ipv4_chksum; + u8 ipv6_chksum; + u8 tcp_ipv4_chksum; + u8 tcp_ipv6_chksum; + u8 udp_ipv4_chksum; + u8 udp_ipv6_chksum; + u8 large_tx_ipv4; + u8 large_tx_ipv6; + u8 large_rx_ipv4; + u8 large_rx_ipv6; + u8 reserved1[14]; + __be16 max_ipv4_header_size; + __be16 max_ipv6_header_size; + __be16 max_tcp_header_size; + __be16 max_udp_header_size; + __be32 max_large_tx_size; + __be32 max_large_rx_size; + u8 reserved2[16]; + u8 ipv6_extension_header; +#define IPV6_EH_NOT_SUPPORTED 0x00 +#define IPV6_EH_SUPPORTED_LIM 0x01 +#define IPV6_EH_SUPPORTED 0xFF + u8 tcp_pseudosum_req; +#define TCP_PS_NOT_REQUIRED 0x00 +#define TCP_PS_REQUIRED 0x01 + u8 reserved3[30]; + __be16 num_ipv6_ext_headers; + __be32 off_ipv6_ext_headers; + u8 reserved4[154]; +} __packed __aligned(8); + +struct ibmvnic_control_ip_offload_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_IOB 1 + u8 ipv4_chksum; + u8 ipv6_chksum; + u8 tcp_ipv4_chksum; + u8 tcp_ipv6_chksum; + u8 udp_ipv4_chksum; + u8 udp_ipv6_chksum; + u8 large_tx_ipv4; + u8 large_tx_ipv6; + u8 bad_packet_rx; + u8 large_rx_ipv4; + u8 large_rx_ipv6; + u8 reserved4[111]; +} __packed __aligned(8); + +struct ibmvnic_fw_component { + u8 name[48]; + __be32 trace_buff_size; + u8 correlator; + u8 trace_level; + u8 parent_correlator; + u8 error_check_level; + u8 trace_on; + u8 reserved[7]; + u8 description[192]; +} __packed __aligned(8); + +struct ibmvnic_fw_trace_entry { + __be32 trace_id; + u8 num_valid_data; + u8 reserved[3]; + __be64 pmc_registers; + __be64 timebase; + __be64 trace_data[5]; +} __packed __aligned(8); + +struct ibmvnic_statistics { + __be32 version; + __be32 promiscuous; + __be64 rx_packets; + __be64 rx_bytes; + __be64 tx_packets; + __be64 tx_bytes; + __be64 ucast_tx_packets; + __be64 ucast_rx_packets; + __be64 mcast_tx_packets; + __be64 mcast_rx_packets; + __be64 bcast_tx_packets; + __be64 bcast_rx_packets; + __be64 align_errors; + __be64 fcs_errors; + __be64 single_collision_frames; + __be64 multi_collision_frames; + __be64 sqe_test_errors; + __be64 deferred_tx; + __be64 late_collisions; + __be64 excess_collisions; + __be64 internal_mac_tx_errors; + __be64 carrier_sense; + __be64 too_long_frames; + __be64 internal_mac_rx_errors; + u8 reserved[72]; +} __packed __aligned(8); + +struct ibmvnic_acl_buffer { + __be32 len; + __be32 version; +#define INITIAL_VERSION_IOB 1 + u8 mac_acls_restrict; + u8 vlan_acls_restrict; + u8 reserved1[22]; + __be32 num_mac_addrs; + __be32 offset_mac_addrs; + __be32 num_vlan_ids; + __be32 offset_vlan_ids; + u8 reserved2[80]; +} __packed __aligned(8); + +/* descriptors have been changed, how should this be defined? 1? 4? */ + +#define IBMVNIC_TX_DESC_VERSIONS 3 + +/* is this still needed? */ +struct ibmvnic_tx_comp_desc { + u8 first; + u8 num_comps; + __be16 rcs[5]; + __be32 correlators[5]; +} __packed __aligned(8); + +/* some flags that included in v0 descriptor, which is gone + * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM + * and only in some offload_flags variable that doesn't seem + * to be used anywhere, can probably be removed? + */ + +#define IBMVNIC_TCP_CHKSUM 0x20 +#define IBMVNIC_UDP_CHKSUM 0x08 + +#define IBMVNIC_MAX_FRAGS_PER_CRQ 3 + +struct ibmvnic_tx_desc { + u8 first; + u8 type; + +#define IBMVNIC_TX_DESC 0x10 + u8 n_crq_elem; + u8 n_sge; + u8 flags1; +#define IBMVNIC_TX_COMP_NEEDED 0x80 +#define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40 +#define IBMVNIC_TX_LSO 0x20 +#define IBMVNIC_TX_PROT_TCP 0x10 +#define IBMVNIC_TX_PROT_UDP 0x08 +#define IBMVNIC_TX_PROT_IPV4 0x04 +#define IBMVNIC_TX_PROT_IPV6 0x02 +#define IBMVNIC_TX_VLAN_PRESENT 0x01 + u8 flags2; +#define IBMVNIC_TX_VLAN_INSERT 0x80 + __be16 mss; + u8 reserved[4]; + __be32 correlator; + __be16 vlan_id; + __be16 dma_reg; + __be32 sge_len; + __be64 ioba; +} __packed __aligned(8); + +struct ibmvnic_hdr_desc { + u8 first; + u8 type; +#define IBMVNIC_HDR_DESC 0x11 + u8 len; + u8 l2_len; + __be16 l3_len; + u8 l4_len; + u8 flag; + u8 data[24]; +} __packed __aligned(8); + +struct ibmvnic_hdr_ext_desc { + u8 first; + u8 type; +#define IBMVNIC_HDR_EXT_DESC 0x12 + u8 len; + u8 data[29]; +} __packed __aligned(8); + +struct ibmvnic_sge_desc { + u8 first; + u8 type; +#define IBMVNIC_SGE_DESC 0x30 + __be16 sge1_dma_reg; + __be32 sge1_len; + __be64 sge1_ioba; + __be16 reserved; + __be16 sge2_dma_reg; + __be32 sge2_len; + __be64 sge2_ioba; +} __packed __aligned(8); + +struct ibmvnic_rx_comp_desc { + u8 first; + u8 flags; +#define IBMVNIC_IP_CHKSUM_GOOD 0x80 +#define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40 +#define IBMVNIC_END_FRAME 0x20 +#define IBMVNIC_EXACT_MC 0x10 +#define IBMVNIC_VLAN_STRIPPED 0x08 + __be16 off_frame_data; + __be32 len; + __be64 correlator; + __be16 vlan_tci; + __be16 rc; + u8 reserved[12]; +} __packed __aligned(8); + +struct ibmvnic_generic_scrq { + u8 first; + u8 reserved[31]; +} __packed __aligned(8); + +struct ibmvnic_rx_buff_add_desc { + u8 first; + u8 reserved[7]; + __be64 correlator; + __be32 ioba; + u8 map_id; + __be32 len:24; + u8 reserved2[8]; +} __packed __aligned(8); + +struct ibmvnic_rc { + u8 code; /* one of enum ibmvnic_rc_codes */ + u8 detailed_data[3]; +} __packed __aligned(4); + +struct ibmvnic_generic_crq { + u8 first; + u8 cmd; + u8 params[10]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_version_exchange { + u8 first; + u8 cmd; + __be16 version; +#define IBMVNIC_INITIAL_VERSION 1 + u8 reserved[8]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_capability { + u8 first; + u8 cmd; + __be16 capability; /* one of ibmvnic_capabilities */ + struct ibmvnic_rc rc; + __be32 number; /*FIX: should be __be64, but I'm getting the least + * significant word first + */ +} __packed __aligned(8); + +struct ibmvnic_login { + u8 first; + u8 cmd; + u8 reserved[6]; + __be32 ioba; + __be32 len; +} __packed __aligned(8); + +struct ibmvnic_phys_parms { + u8 first; + u8 cmd; + u8 flags1; +#define IBMVNIC_EXTERNAL_LOOPBACK 0x80 +#define IBMVNIC_INTERNAL_LOOPBACK 0x40 +#define IBMVNIC_PROMISC 0x20 +#define IBMVNIC_PHYS_LINK_ACTIVE 0x10 +#define IBMVNIC_AUTONEG_DUPLEX 0x08 +#define IBMVNIC_FULL_DUPLEX 0x04 +#define IBMVNIC_HALF_DUPLEX 0x02 +#define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01 + u8 flags2; +#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80 + __be32 speed; +#define IBMVNIC_AUTONEG 0x80 +#define IBMVNIC_10MBPS 0x40 +#define IBMVNIC_100MBPS 0x20 +#define IBMVNIC_1GBPS 0x10 +#define IBMVNIC_10GBPS 0x08 + __be32 mtu; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_logical_link_state { + u8 first; + u8 cmd; + u8 link_state; +#define IBMVNIC_LOGICAL_LNK_DN 0x00 +#define IBMVNIC_LOGICAL_LNK_UP 0x01 +#define IBMVNIC_LOGICAL_LNK_QUERY 0xff + u8 reserved[9]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_query_ip_offload { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 len; + __be32 ioba; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_control_ip_offload { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_dump_size { + u8 first; + u8 cmd; + u8 reserved[6]; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_dump { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_request_dump_rsp { + u8 first; + u8 cmd; + u8 reserved[6]; + __be32 dumped_len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_ras_comp_num { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 num_components; + u8 reserved2[4]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_ras_comps { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_control_ras { + u8 first; + u8 cmd; + u8 correlator; + u8 level; + u8 op; +#define IBMVNIC_TRACE_LEVEL 1 +#define IBMVNIC_ERROR_LEVEL 2 +#define IBMVNIC_TRACE_PAUSE 3 +#define IBMVNIC_TRACE_RESUME 4 +#define IBMVNIC_TRACE_ON 5 +#define IBMVNIC_TRACE_OFF 6 +#define IBMVNIC_CHG_TRACE_BUFF_SZ 7 + u8 trace_buff_sz[3]; + u8 reserved[4]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_collect_fw_trace { + u8 first; + u8 cmd; + u8 correlator; + u8 reserved; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_statistics { + u8 first; + u8 cmd; + u8 flags; +#define IBMVNIC_PHYSICAL_PORT 0x80 + u8 reserved1; + __be32 ioba; + __be32 len; + u8 reserved[4]; +} __packed __aligned(8); + +struct ibmvnic_request_debug_stats { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_error_indication { + u8 first; + u8 cmd; + u8 flags; +#define IBMVNIC_FATAL_ERROR 0x80 + u8 reserved1; + __be32 error_id; + __be32 detail_error_sz; + __be16 error_cause; + u8 reserved2[2]; +} __packed __aligned(8); + +struct ibmvnic_request_error_info { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 ioba; + __be32 len; + __be32 error_id; +} __packed __aligned(8); + +struct ibmvnic_request_error_rsp { + u8 first; + u8 cmd; + u8 reserved[2]; + __be32 error_id; + __be32 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_link_state_indication { + u8 first; + u8 cmd; + u8 reserved1[2]; + u8 phys_link_state; + u8 logical_link_state; + u8 reserved2[10]; +} __packed __aligned(8); + +struct ibmvnic_change_mac_addr { + u8 first; + u8 cmd; + u8 mac_addr[6]; + struct ibmvnic_rc rc; + u8 reserved[4]; +} __packed __aligned(8); + +struct ibmvnic_multicast_ctrl { + u8 first; + u8 cmd; + u8 mac_addr[6]; + u8 flags; +#define IBMVNIC_ENABLE_MC 0x80 +#define IBMVNIC_DISABLE_MC 0x40 +#define IBMVNIC_ENABLE_ALL 0x20 +#define IBMVNIC_DISABLE_ALL 0x10 + u8 reserved1; + __be16 reserved2; /* was num_enabled_mc_addr; */ + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_get_vpd_size_rsp { + u8 first; + u8 cmd; + u8 reserved[2]; + __be64 len; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_get_vpd { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved[4]; +} __packed __aligned(8); + +struct ibmvnic_acl_change_indication { + u8 first; + u8 cmd; + __be16 change_type; +#define IBMVNIC_MAC_ACL 0 +#define IBMVNIC_VLAN_ACL 1 + u8 reserved[12]; +} __packed __aligned(8); + +struct ibmvnic_acl_query { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_tune { + u8 first; + u8 cmd; + u8 reserved1[2]; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_request_map { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + __be32 ioba; + __be32 len; + u8 reserved2[4]; +} __packed __aligned(8); + +struct ibmvnic_request_map_rsp { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + u8 reserved2[4]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_request_unmap { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + u8 reserved2[12]; +} __packed __aligned(8); + +struct ibmvnic_request_unmap_rsp { + u8 first; + u8 cmd; + u8 reserved1; + u8 map_id; + u8 reserved2[8]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +struct ibmvnic_query_map { + u8 first; + u8 cmd; + u8 reserved[14]; +} __packed __aligned(8); + +struct ibmvnic_query_map_rsp { + u8 first; + u8 cmd; + u8 reserved; + u8 page_size; + __be32 tot_pages; + __be32 free_pages; + struct ibmvnic_rc rc; +} __packed __aligned(8); + +union ibmvnic_crq { + struct ibmvnic_generic_crq generic; + struct ibmvnic_version_exchange version_exchange; + struct ibmvnic_version_exchange version_exchange_rsp; + struct ibmvnic_capability query_capability; + struct ibmvnic_capability query_capability_rsp; + struct ibmvnic_capability request_capability; + struct ibmvnic_capability request_capability_rsp; + struct ibmvnic_login login; + struct ibmvnic_generic_crq login_rsp; + struct ibmvnic_phys_parms query_phys_parms; + struct ibmvnic_phys_parms query_phys_parms_rsp; + struct ibmvnic_phys_parms query_phys_capabilities; + struct ibmvnic_phys_parms query_phys_capabilities_rsp; + struct ibmvnic_phys_parms set_phys_parms; + struct ibmvnic_phys_parms set_phys_parms_rsp; + struct ibmvnic_logical_link_state logical_link_state; + struct ibmvnic_logical_link_state logical_link_state_rsp; + struct ibmvnic_query_ip_offload query_ip_offload; + struct ibmvnic_query_ip_offload query_ip_offload_rsp; + struct ibmvnic_control_ip_offload control_ip_offload; + struct ibmvnic_control_ip_offload control_ip_offload_rsp; + struct ibmvnic_request_dump_size request_dump_size; + struct ibmvnic_request_dump_size request_dump_size_rsp; + struct ibmvnic_request_dump request_dump; + struct ibmvnic_request_dump_rsp request_dump_rsp; + struct ibmvnic_request_ras_comp_num request_ras_comp_num; + struct ibmvnic_request_ras_comp_num request_ras_comp_num_rsp; + struct ibmvnic_request_ras_comps request_ras_comps; + struct ibmvnic_request_ras_comps request_ras_comps_rsp; + struct ibmvnic_control_ras control_ras; + struct ibmvnic_control_ras control_ras_rsp; + struct ibmvnic_collect_fw_trace collect_fw_trace; + struct ibmvnic_collect_fw_trace collect_fw_trace_rsp; + struct ibmvnic_request_statistics request_statistics; + struct ibmvnic_generic_crq request_statistics_rsp; + struct ibmvnic_request_debug_stats request_debug_stats; + struct ibmvnic_request_debug_stats request_debug_stats_rsp; + struct ibmvnic_error_indication error_indication; + struct ibmvnic_request_error_info request_error_info; + struct ibmvnic_request_error_rsp request_error_rsp; + struct ibmvnic_link_state_indication link_state_indication; + struct ibmvnic_change_mac_addr change_mac_addr; + struct ibmvnic_change_mac_addr change_mac_addr_rsp; + struct ibmvnic_multicast_ctrl multicast_ctrl; + struct ibmvnic_multicast_ctrl multicast_ctrl_rsp; + struct ibmvnic_generic_crq get_vpd_size; + struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp; + struct ibmvnic_get_vpd get_vpd; + struct ibmvnic_generic_crq get_vpd_rsp; + struct ibmvnic_acl_change_indication acl_change_indication; + struct ibmvnic_acl_query acl_query; + struct ibmvnic_generic_crq acl_query_rsp; + struct ibmvnic_tune tune; + struct ibmvnic_generic_crq tune_rsp; + struct ibmvnic_request_map request_map; + struct ibmvnic_request_map_rsp request_map_rsp; + struct ibmvnic_request_unmap request_unmap; + struct ibmvnic_request_unmap_rsp request_unmap_rsp; + struct ibmvnic_query_map query_map; + struct ibmvnic_query_map_rsp query_map_rsp; +}; + +enum ibmvnic_rc_codes { + SUCCESS = 0, + PARTIALSUCCESS = 1, + PERMISSION = 2, + NOMEMORY = 3, + PARAMETER = 4, + UNKNOWNCOMMAND = 5, + ABORTED = 6, + INVALIDSTATE = 7, + INVALIDIOBA = 8, + INVALIDLENGTH = 9, + UNSUPPORTEDOPTION = 10, +}; + +enum ibmvnic_capabilities { + MIN_TX_QUEUES = 1, + MIN_RX_QUEUES = 2, + MIN_RX_ADD_QUEUES = 3, + MAX_TX_QUEUES = 4, + MAX_RX_QUEUES = 5, + MAX_RX_ADD_QUEUES = 6, + REQ_TX_QUEUES = 7, + REQ_RX_QUEUES = 8, + REQ_RX_ADD_QUEUES = 9, + MIN_TX_ENTRIES_PER_SUBCRQ = 10, + MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11, + MAX_TX_ENTRIES_PER_SUBCRQ = 12, + MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13, + REQ_TX_ENTRIES_PER_SUBCRQ = 14, + REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15, + TCP_IP_OFFLOAD = 16, + PROMISC_REQUESTED = 17, + PROMISC_SUPPORTED = 18, + MIN_MTU = 19, + MAX_MTU = 20, + REQ_MTU = 21, + MAX_MULTICAST_FILTERS = 22, + VLAN_HEADER_INSERTION = 23, + MAX_TX_SG_ENTRIES = 25, + RX_SG_SUPPORTED = 26, + RX_SG_REQUESTED = 27, + OPT_TX_COMP_SUB_QUEUES = 28, + OPT_RX_COMP_QUEUES = 29, + OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30, + OPT_TX_ENTRIES_PER_SUBCRQ = 31, + OPT_RXBA_ENTRIES_PER_SUBCRQ = 32, + TX_RX_DESC_REQ = 33, +}; + +enum ibmvnic_error_cause { + ADAPTER_PROBLEM = 0, + BUS_PROBLEM = 1, + FW_PROBLEM = 2, + DD_PROBLEM = 3, + EEH_RECOVERY = 4, + FW_UPDATED = 5, + LOW_MEMORY = 6, +}; + +enum ibmvnic_commands { + VERSION_EXCHANGE = 0x01, + VERSION_EXCHANGE_RSP = 0x81, + QUERY_CAPABILITY = 0x02, + QUERY_CAPABILITY_RSP = 0x82, + REQUEST_CAPABILITY = 0x03, + REQUEST_CAPABILITY_RSP = 0x83, + LOGIN = 0x04, + LOGIN_RSP = 0x84, + QUERY_PHYS_PARMS = 0x05, + QUERY_PHYS_PARMS_RSP = 0x85, + QUERY_PHYS_CAPABILITIES = 0x06, + QUERY_PHYS_CAPABILITIES_RSP = 0x86, + SET_PHYS_PARMS = 0x07, + SET_PHYS_PARMS_RSP = 0x87, + ERROR_INDICATION = 0x08, + REQUEST_ERROR_INFO = 0x09, + REQUEST_ERROR_RSP = 0x89, + REQUEST_DUMP_SIZE = 0x0A, + REQUEST_DUMP_SIZE_RSP = 0x8A, + REQUEST_DUMP = 0x0B, + REQUEST_DUMP_RSP = 0x8B, + LOGICAL_LINK_STATE = 0x0C, + LOGICAL_LINK_STATE_RSP = 0x8C, + REQUEST_STATISTICS = 0x0D, + REQUEST_STATISTICS_RSP = 0x8D, + REQUEST_RAS_COMP_NUM = 0x0E, + REQUEST_RAS_COMP_NUM_RSP = 0x8E, + REQUEST_RAS_COMPS = 0x0F, + REQUEST_RAS_COMPS_RSP = 0x8F, + CONTROL_RAS = 0x10, + CONTROL_RAS_RSP = 0x90, + COLLECT_FW_TRACE = 0x11, + COLLECT_FW_TRACE_RSP = 0x91, + LINK_STATE_INDICATION = 0x12, + CHANGE_MAC_ADDR = 0x13, + CHANGE_MAC_ADDR_RSP = 0x93, + MULTICAST_CTRL = 0x14, + MULTICAST_CTRL_RSP = 0x94, + GET_VPD_SIZE = 0x15, + GET_VPD_SIZE_RSP = 0x95, + GET_VPD = 0x16, + GET_VPD_RSP = 0x96, + TUNE = 0x17, + TUNE_RSP = 0x97, + QUERY_IP_OFFLOAD = 0x18, + QUERY_IP_OFFLOAD_RSP = 0x98, + CONTROL_IP_OFFLOAD = 0x19, + CONTROL_IP_OFFLOAD_RSP = 0x99, + ACL_CHANGE_INDICATION = 0x1A, + ACL_QUERY = 0x1B, + ACL_QUERY_RSP = 0x9B, + REQUEST_DEBUG_STATS = 0x1C, + REQUEST_DEBUG_STATS_RSP = 0x9C, + QUERY_MAP = 0x1D, + QUERY_MAP_RSP = 0x9D, + REQUEST_MAP = 0x1E, + REQUEST_MAP_RSP = 0x9E, + REQUEST_UNMAP = 0x1F, + REQUEST_UNMAP_RSP = 0x9F, + VLAN_CTRL = 0x20, + VLAN_CTRL_RSP = 0xA0, +}; + +enum ibmvnic_crq_type { + IBMVNIC_CRQ_CMD = 0x80, + IBMVNIC_CRQ_CMD_RSP = 0x80, + IBMVNIC_CRQ_INIT_CMD = 0xC0, + IBMVNIC_CRQ_INIT_RSP = 0xC0, + IBMVNIC_CRQ_XPORT_EVENT = 0xFF, +}; + +enum ibmvfc_crq_format { + IBMVNIC_CRQ_INIT = 0x01, + IBMVNIC_CRQ_INIT_COMPLETE = 0x02, + IBMVNIC_PARTITION_MIGRATED = 0x06, +}; + +struct ibmvnic_crq_queue { + union ibmvnic_crq *msgs; + int size, cur; + dma_addr_t msg_token; + spinlock_t lock; +}; + +union sub_crq { + struct ibmvnic_generic_scrq generic; + struct ibmvnic_tx_comp_desc tx_comp; + struct ibmvnic_tx_desc v1; + struct ibmvnic_hdr_desc hdr; + struct ibmvnic_hdr_ext_desc hdr_ext; + struct ibmvnic_sge_desc sge; + struct ibmvnic_rx_comp_desc rx_comp; + struct ibmvnic_rx_buff_add_desc rx_add; +}; + +struct ibmvnic_sub_crq_queue { + union sub_crq *msgs; + int size, cur; + dma_addr_t msg_token; + unsigned long crq_num; + unsigned long hw_irq; + unsigned int irq; + unsigned int pool_index; + int scrq_num; + spinlock_t lock; + struct sk_buff *rx_skb_top; + struct ibmvnic_adapter *adapter; +}; + +struct ibmvnic_long_term_buff { + unsigned char *buff; + dma_addr_t addr; + u64 size; + u8 map_id; +}; + +struct ibmvnic_tx_buff { + struct sk_buff *skb; + dma_addr_t data_dma[IBMVNIC_MAX_FRAGS_PER_CRQ]; + unsigned int data_len[IBMVNIC_MAX_FRAGS_PER_CRQ]; + int index; + int pool_index; + bool last_frag; + bool used_bounce; +}; + +struct ibmvnic_tx_pool { + struct ibmvnic_tx_buff *tx_buff; + int *free_map; + int consumer_index; + int producer_index; + wait_queue_head_t ibmvnic_tx_comp_q; + struct task_struct *work_thread; + struct ibmvnic_long_term_buff long_term_buff; +}; + +struct ibmvnic_rx_buff { + struct sk_buff *skb; + dma_addr_t dma; + unsigned char *data; + int size; + int pool_index; +}; + +struct ibmvnic_rx_pool { + struct ibmvnic_rx_buff *rx_buff; + int size; + int index; + int buff_size; + atomic_t available; + int *free_map; + int next_free; + int next_alloc; + int active; + struct ibmvnic_long_term_buff long_term_buff; +}; + +struct ibmvnic_error_buff { + char *buff; + dma_addr_t dma; + int len; + struct list_head list; + __be32 error_id; +}; + +struct ibmvnic_fw_comp_internal { + struct ibmvnic_adapter *adapter; + int num; + struct debugfs_blob_wrapper desc_blob; + int paused; +}; + +struct ibmvnic_inflight_cmd { + union ibmvnic_crq crq; + struct list_head list; +}; + +struct ibmvnic_adapter { + struct vio_dev *vdev; + struct net_device *netdev; + struct ibmvnic_crq_queue crq; + u8 mac_addr[ETH_ALEN]; + struct ibmvnic_query_ip_offload_buffer ip_offload_buf; + dma_addr_t ip_offload_tok; + struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; + dma_addr_t ip_offload_ctrl_tok; + bool migrated; + u32 msg_enable; + void *bounce_buffer; + int bounce_buffer_size; + dma_addr_t bounce_buffer_dma; + + /* Statistics */ + struct net_device_stats net_stats; + struct ibmvnic_statistics stats; + dma_addr_t stats_token; + struct completion stats_done; + spinlock_t stats_lock; + int replenish_no_mem; + int replenish_add_buff_success; + int replenish_add_buff_failure; + int replenish_task_cycles; + int tx_send_failed; + int tx_map_failed; + + int phys_link_state; + int logical_link_state; + + /* login data */ + struct ibmvnic_login_buffer *login_buf; + dma_addr_t login_buf_token; + int login_buf_sz; + + struct ibmvnic_login_rsp_buffer *login_rsp_buf; + dma_addr_t login_rsp_buf_token; + int login_rsp_buf_sz; + + atomic_t running_cap_queries; + + struct ibmvnic_sub_crq_queue **tx_scrq; + struct ibmvnic_sub_crq_queue **rx_scrq; + int requested_caps; + + /* rx structs */ + struct napi_struct *napi; + struct ibmvnic_rx_pool *rx_pool; + u64 promisc; + + struct ibmvnic_tx_pool *tx_pool; + bool closing; + struct completion init_done; + + struct list_head errors; + spinlock_t error_list_lock; + + /* debugfs */ + struct dentry *debugfs_dir; + struct dentry *debugfs_dump; + struct completion fw_done; + char *dump_data; + dma_addr_t dump_data_token; + int dump_data_size; + int ras_comp_num; + struct ibmvnic_fw_component *ras_comps; + struct ibmvnic_fw_comp_internal *ras_comp_int; + dma_addr_t ras_comps_tok; + struct dentry *ras_comps_ent; + + /* in-flight commands that allocate and/or map memory*/ + struct list_head inflight; + spinlock_t inflight_lock; + + /* partner capabilities */ + u64 min_tx_queues; + u64 min_rx_queues; + u64 min_rx_add_queues; + u64 max_tx_queues; + u64 max_rx_queues; + u64 max_rx_add_queues; + u64 req_tx_queues; + u64 req_rx_queues; + u64 req_rx_add_queues; + u64 min_tx_entries_per_subcrq; + u64 min_rx_add_entries_per_subcrq; + u64 max_tx_entries_per_subcrq; + u64 max_rx_add_entries_per_subcrq; + u64 req_tx_entries_per_subcrq; + u64 req_rx_add_entries_per_subcrq; + u64 tcp_ip_offload; + u64 promisc_requested; + u64 promisc_supported; + u64 min_mtu; + u64 max_mtu; + u64 req_mtu; + u64 max_multicast_filters; + u64 vlan_header_insertion; + u64 max_tx_sg_entries; + u64 rx_sg_supported; + u64 rx_sg_requested; + u64 opt_tx_comp_sub_queues; + u64 opt_rx_comp_queues; + u64 opt_rx_bufadd_q_per_rx_comp_q; + u64 opt_tx_entries_per_subcrq; + u64 opt_rxba_entries_per_subcrq; + __be64 tx_rx_desc_req; + u8 map_id; +}; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 4163b16489b3..fa593dd3efe1 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -280,6 +280,16 @@ config I40E_VXLAN Say Y here if you want to use Virtual eXtensible Local Area Network (VXLAN) in the driver. +config I40E_GENEVE + bool "Generic Network Virtualization Encapsulation (GENEVE) Support" + depends on I40E && GENEVE && !(I40E=y && GENEVE=m) + default n + ---help--- + This allows one to create GENEVE virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. GENEVE is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use GENEVE in the driver. + config I40E_DCB bool "Data Center Bridging (DCB) Support" default n diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 69707108d23c..98fe5a2cd6e3 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -213,8 +213,11 @@ struct e1000_rx_ring { }; #define E1000_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) \ - ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) #define E1000_RX_DESC_EXT(R, i) \ (&(((union e1000_rx_desc_extended *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index b1af0d613caa..8172cf08cc33 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1,5 +1,5 @@ /******************************************************************************* - +* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2006 Intel Corporation. @@ -106,7 +106,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { 120, 120 }; -static DEFINE_SPINLOCK(e1000_eeprom_lock); +static DEFINE_MUTEX(e1000_eeprom_lock); static DEFINE_SPINLOCK(e1000_phy_lock); /** @@ -624,8 +624,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) /* Workaround for PCI-X problem when BIOS sets MMRBC * incorrectly. */ - if (hw->bus_type == e1000_bus_type_pcix - && e1000_pcix_get_mmrbc(hw) > 2048) + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) e1000_pcix_set_mmrbc(hw, 2048); break; } @@ -683,10 +683,9 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) } ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, - &eeprom_data); - if (ret_val) { + &eeprom_data); + if (ret_val) return ret_val; - } if (eeprom_data != EEPROM_RESERVED_WORD) { /* Adjust SERDES output amplitude only. */ @@ -1074,8 +1073,8 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) if (hw->mac_type <= e1000_82543 || hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) hw->phy_reset_disable = false; return E1000_SUCCESS; @@ -1652,7 +1651,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) mii_1000t_ctrl_reg = 0; } else { ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, - mii_1000t_ctrl_reg); + mii_1000t_ctrl_reg); if (ret_val) return ret_val; } @@ -1881,10 +1880,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) if (ret_val) return ret_val; - if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) - && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { ret_val = e1000_polarity_reversal_workaround(hw); if (ret_val) return ret_val; @@ -2084,11 +2084,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ - if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_internal_serdes) - && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_copper) - && (!hw->autoneg))) { + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { ret_val = e1000_force_mac_fc(hw); if (ret_val) { e_dbg("Error forcing flow control settings\n"); @@ -2193,8 +2194,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = E1000_FC_TX_PAUSE; e_dbg ("Flow Control = TX PAUSE frames only.\n"); @@ -2210,8 +2210,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = E1000_FC_RX_PAUSE; e_dbg ("Flow Control = RX PAUSE frames only.\n"); @@ -2460,10 +2459,11 @@ s32 e1000_check_for_link(struct e1000_hw *hw) * happen due to the execution of this workaround. */ - if ((hw->mac_type == e1000_82544 - || hw->mac_type == e1000_82543) && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { ew32(IMC, 0xffffffff); ret_val = e1000_polarity_reversal_workaround(hw); @@ -2528,8 +2528,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw) */ if (hw->tbi_compatibility_en) { u16 speed, duplex; + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { e_dbg ("Error getting link speed and duplex\n"); @@ -2628,10 +2630,10 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); if (ret_val) return ret_val; - if ((*speed == SPEED_100 - && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) - || (*speed == SPEED_10 - && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) *duplex = HALF_DUPLEX; } } @@ -2664,9 +2666,9 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); if (ret_val) return ret_val; - if (phy_data & MII_SR_AUTONEG_COMPLETE) { + if (phy_data & MII_SR_AUTONEG_COMPLETE) return E1000_SUCCESS; - } + msleep(100); } return E1000_SUCCESS; @@ -2803,11 +2805,11 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) return data; } - /** * e1000_read_phy_reg - read a phy register * @hw: Struct containing variables accessed by shared code * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register * * Reads the value from a PHY register, if the value is on a specific non zero * page, sets the page first. @@ -2823,14 +2825,13 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, (u16) reg_addr); - if (ret_val) { - spin_unlock_irqrestore(&e1000_phy_lock, flags); - return ret_val; - } + if (ret_val) + goto out; } ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, phy_data); +out: spin_unlock_irqrestore(&e1000_phy_lock, flags); return ret_val; @@ -2881,7 +2882,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, e_dbg("MDI Read Error\n"); return -E1000_ERR_PHY; } - *phy_data = (u16) mdic; + *phy_data = (u16)mdic; } else { mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | @@ -2906,7 +2907,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } - *phy_data = (u16) mdic; + *phy_data = (u16)mdic; } } else { /* We must first send a preamble through the MDIO pin to signal @@ -2960,7 +2961,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) if ((hw->phy_type == e1000_phy_igp) && (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, - (u16) reg_addr); + (u16)reg_addr); if (ret_val) { spin_unlock_irqrestore(&e1000_phy_lock, flags); return ret_val; @@ -2993,7 +2994,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, * the desired data. */ if (hw->mac_type == e1000_ce4100) { - mdic = (((u32) phy_data) | + mdic = (((u32)phy_data) | (reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | (INTEL_CE_GBE_MDIC_OP_WRITE) | @@ -3015,7 +3016,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, return -E1000_ERR_PHY; } } else { - mdic = (((u32) phy_data) | + mdic = (((u32)phy_data) | (reg_addr << E1000_MDIC_REG_SHIFT) | (phy_addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); @@ -3053,7 +3054,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); mdic <<= 16; - mdic |= (u32) phy_data; + mdic |= (u32)phy_data; e1000_shift_out_mdi_bits(hw, mdic, 32); } @@ -3176,14 +3177,14 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) if (ret_val) return ret_val; - hw->phy_id = (u32) (phy_id_high << 16); + hw->phy_id = (u32)(phy_id_high << 16); udelay(20); ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); if (ret_val) return ret_val; - hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); - hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; switch (hw->mac_type) { case e1000_82543: @@ -3401,7 +3402,6 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> SR_1000T_REMOTE_RX_STATUS_SHIFT) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - } return E1000_SUCCESS; @@ -3449,7 +3449,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) if (hw->phy_type == e1000_phy_igp) return e1000_phy_igp_get_info(hw, phy_info); else if ((hw->phy_type == e1000_phy_8211) || - (hw->phy_type == e1000_phy_8201)) + (hw->phy_type == e1000_phy_8201)) return E1000_SUCCESS; else return e1000_phy_m88_get_info(hw, phy_info); @@ -3611,11 +3611,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) */ mask = 0x01 << (count - 1); eecd = er32(EECD); - if (eeprom->type == e1000_eeprom_microwire) { + if (eeprom->type == e1000_eeprom_microwire) eecd &= ~E1000_EECD_DO; - } else if (eeprom->type == e1000_eeprom_spi) { + else if (eeprom->type == e1000_eeprom_spi) eecd |= E1000_EECD_DO; - } + do { /* A "1" is shifted out to the EEPROM by setting bit "DI" to a * "1", and then raising and then lowering the clock (the SK bit @@ -3851,7 +3851,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) do { e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, hw->eeprom.opcode_bits); - spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) break; @@ -3882,9 +3882,10 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret; - spin_lock(&e1000_eeprom_lock); + + mutex_lock(&e1000_eeprom_lock); ret = e1000_do_read_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); + mutex_unlock(&e1000_eeprom_lock); return ret; } @@ -3896,15 +3897,16 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (hw->mac_type == e1000_ce4100) { GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, - data); + data); return E1000_SUCCESS; } /* A check for invalid values: offset too large, too many words, and * not enough words. */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { e_dbg("\"words\" parameter out of bounds. Words = %d," "size = %d\n", offset, eeprom->word_size); return -E1000_ERR_EEPROM; @@ -3940,7 +3942,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, /* Send the READ command (opcode + addr) */ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset * 2), + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), eeprom->address_bits); /* Read the data. The address of the eeprom internally @@ -3960,7 +3962,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset + i), + e1000_shift_out_ee_bits(hw, (u16)(offset + i), eeprom->address_bits); /* Read the data. For microwire, each word requires the @@ -3968,6 +3970,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, */ data[i] = e1000_shift_in_ee_bits(hw, 16); e1000_standby_eeprom(hw); + cond_resched(); } } @@ -4004,7 +4007,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) return E1000_SUCCESS; #endif - if (checksum == (u16) EEPROM_SUM) + if (checksum == (u16)EEPROM_SUM) return E1000_SUCCESS; else { e_dbg("EEPROM Checksum Invalid\n"); @@ -4031,7 +4034,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) } checksum += eeprom_data; } - checksum = (u16) EEPROM_SUM - checksum; + checksum = (u16)EEPROM_SUM - checksum; if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { e_dbg("EEPROM Write Error\n"); return -E1000_ERR_EEPROM; @@ -4052,9 +4055,10 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret; - spin_lock(&e1000_eeprom_lock); + + mutex_lock(&e1000_eeprom_lock); ret = e1000_do_write_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); + mutex_unlock(&e1000_eeprom_lock); return ret; } @@ -4066,15 +4070,16 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (hw->mac_type == e1000_ce4100) { GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, - data); + data); return E1000_SUCCESS; } /* A check for invalid values: offset too large, too many words, and * not enough words. */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { e_dbg("\"words\" parameter out of bounds\n"); return -E1000_ERR_EEPROM; } @@ -4116,6 +4121,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, return -E1000_ERR_EEPROM; e1000_standby_eeprom(hw); + cond_resched(); /* Send the WRITE ENABLE command (8 bit opcode ) */ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, @@ -4132,7 +4138,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, /* Send the Write command (8-bit opcode + addr) */ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), eeprom->address_bits); /* Send the data */ @@ -4142,6 +4148,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, */ while (widx < words) { u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); e1000_shift_out_ee_bits(hw, word_out, 16); widx++; @@ -4183,9 +4190,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, * EEPROM into write/erase mode. */ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); + (u16)(eeprom->opcode_bits + 2)); - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); /* Prepare the EEPROM */ e1000_standby_eeprom(hw); @@ -4195,7 +4202,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), eeprom->address_bits); /* Send the data */ @@ -4224,6 +4231,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, /* Recover from write */ e1000_standby_eeprom(hw); + cond_resched(); words_written++; } @@ -4235,9 +4243,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, * EEPROM out of write/erase mode. */ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); + (u16)(eeprom->opcode_bits + 2)); - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); return E1000_SUCCESS; } @@ -4260,8 +4268,8 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw) e_dbg("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } - hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); - hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); } switch (hw->mac_type) { @@ -4328,19 +4336,19 @@ u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) */ case 0: /* [47:36] i.e. 0x563 for above example address */ - hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; case 1: /* [46:35] i.e. 0xAC6 for above example address */ - hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; case 2: /* [45:34] i.e. 0x5D8 for above example address */ - hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; case 3: /* [43:32] i.e. 0x634 for above example address */ - hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; } @@ -4361,9 +4369,9 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx * unit hang. @@ -4537,7 +4545,7 @@ s32 e1000_setup_led(struct e1000_hw *hw) if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, - (u16) (hw->phy_spd_default & + (u16)(hw->phy_spd_default & ~IGP01E1000_GMII_SPD)); if (ret_val) return ret_val; @@ -4802,7 +4810,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw) void e1000_update_adaptive(struct e1000_hw *hw) { if (hw->adaptive_ifs) { - if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { if (hw->tx_packet_delta > MIN_NUM_XMITS) { hw->in_ifs_mode = true; if (hw->current_ifs_val < hw->ifs_max_val) { @@ -4816,8 +4824,8 @@ void e1000_update_adaptive(struct e1000_hw *hw) } } } else { - if (hw->in_ifs_mode - && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { hw->current_ifs_val = 0; hw->in_ifs_mode = false; ew32(AIT, 0); @@ -4922,7 +4930,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, /* Use old method for Phy older than IGP */ if (hw->phy_type == e1000_phy_m88) { - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) @@ -4966,7 +4973,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { - ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); if (ret_val) @@ -4976,8 +4982,8 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, /* Value bound check. */ if ((cur_agc_value >= - IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) - || (cur_agc_value == 0)) + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) return -E1000_ERR_PHY; agc_value += cur_agc_value; @@ -5054,7 +5060,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, */ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - /* Read the GIG initialization PCS register (0x00B4) */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, @@ -5175,8 +5180,8 @@ static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) hw->ffe_config_state = e1000_ffe_config_active; ret_val = e1000_write_phy_reg(hw, - IGP01E1000_PHY_DSP_FFE, - IGP01E1000_PHY_DSP_FFE_CM_CP); + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); if (ret_val) return ret_val; break; @@ -5243,7 +5248,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { @@ -5264,7 +5269,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) } ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5299,7 +5304,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; ret_val = @@ -5309,7 +5314,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) return ret_val; ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5346,9 +5351,8 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw) ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data); - if (ret_val) { + if (ret_val) return ret_val; - } if ((eeprom_data != EEPROM_RESERVED_WORD) && (eeprom_data & EEPROM_PHY_CLASS_A)) { @@ -5395,8 +5399,8 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) * from the lowest speeds starting from 10Mbps. The capability is used * for Dx transitions and states */ - if (hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); if (ret_val) @@ -5446,11 +5450,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) if (ret_val) return ret_val; } - } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) - || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) - || (hw->autoneg_advertised == - AUTONEG_ADVERTISE_10_100_ALL)) { - + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { phy_data |= IGP01E1000_GMII_FLEX_SPD; @@ -5474,7 +5476,6 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) phy_data); if (ret_val) return ret_val; - } return E1000_SUCCESS; } @@ -5542,7 +5543,6 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw) return E1000_SUCCESS; } - /** * e1000_enable_mng_pass_thru - check for bmc pass through * @hw: Struct containing variables accessed by shared code diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index fd7be860c201..3fc7bde699ba 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -99,13 +99,13 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); void e1000_free_all_tx_resources(struct e1000_adapter *adapter); void e1000_free_all_rx_resources(struct e1000_adapter *adapter); static int e1000_setup_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *txdr); + struct e1000_tx_ring *txdr); static int e1000_setup_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rxdr); + struct e1000_rx_ring *rxdr); static void e1000_free_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); + struct e1000_tx_ring *tx_ring); static void e1000_free_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring); void e1000_update_stats(struct e1000_adapter *adapter); static int e1000_init_module(void); @@ -122,16 +122,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter); static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); static void e1000_clean_tx_ring(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); + struct e1000_tx_ring *tx_ring); static void e1000_clean_rx_ring(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring); static void e1000_set_rx_mode(struct net_device *netdev); static void e1000_update_phy_info_task(struct work_struct *work); static void e1000_watchdog(struct work_struct *work); static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static struct net_device_stats *e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); static irqreturn_t e1000_intr(int irq, void *data); @@ -164,7 +164,7 @@ static void e1000_tx_timeout(struct net_device *dev); static void e1000_reset_task(struct work_struct *work); static void e1000_smartspeed(struct e1000_adapter *adapter); static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, - struct sk_buff *skb); + struct sk_buff *skb); static bool e1000_vlan_used(struct e1000_adapter *adapter); static void e1000_vlan_mode(struct net_device *netdev, @@ -195,7 +195,7 @@ MODULE_PARM_DESC(copybreak, "Maximum size of packet that is copied to a new buffer on receive"); static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state); + pci_channel_state_t state); static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); static void e1000_io_resume(struct pci_dev *pdev); @@ -287,7 +287,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) int err; err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, - netdev); + netdev); if (err) { e_err(probe, "Unable to allocate interrupt Error: %d\n", err); } @@ -636,8 +636,8 @@ void e1000_reset(struct e1000_adapter *adapter) * but don't include ethernet FCS because hardware appends it */ min_tx_space = (hw->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ @@ -943,8 +943,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct e1000_adapter *adapter; struct e1000_hw *hw; - static int cards_found = 0; - static int global_quad_port_a = 0; /* global ksp3 port a indication */ + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ int i, err, pci_using_dac; u16 eeprom_data = 0; u16 tmp = 0; @@ -1046,7 +1046,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac_type == e1000_ce4100) { hw->ce4100_gbe_mdio_base_virt = ioremap(pci_resource_start(pdev, BAR_1), - pci_resource_len(pdev, BAR_1)); + pci_resource_len(pdev, BAR_1)); if (!hw->ce4100_gbe_mdio_base_virt) goto err_mdio_ioremap; @@ -1148,7 +1148,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; case e1000_82546: case e1000_82546_rev_3: - if (er32(STATUS) & E1000_STATUS_FUNC_1){ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { e1000_read_eeprom(hw, EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; @@ -1199,13 +1199,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 32; i++) { hw->phy_addr = i; e1000_read_phy_reg(hw, PHY_ID2, &tmp); - if (tmp == 0 || tmp == 0xFF) { - if (i == 31) - goto err_eeprom; - continue; - } else + + if (tmp != 0 && tmp != 0xFF) break; } + + if (i >= 32) + goto err_eeprom; } /* reset the hardware with the new settings */ @@ -1263,7 +1263,7 @@ err_pci_reg: * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a + * that it should release a PCI device. That could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ @@ -1334,12 +1334,12 @@ static int e1000_sw_init(struct e1000_adapter *adapter) static int e1000_alloc_queues(struct e1000_adapter *adapter) { adapter->tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct e1000_tx_ring), GFP_KERNEL); + sizeof(struct e1000_tx_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; adapter->rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct e1000_rx_ring), GFP_KERNEL); + sizeof(struct e1000_rx_ring), GFP_KERNEL); if (!adapter->rx_ring) { kfree(adapter->tx_ring); return -ENOMEM; @@ -1811,20 +1811,20 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) rctl &= ~E1000_RCTL_SZ_4096; rctl |= E1000_RCTL_BSEX; switch (adapter->rx_buffer_len) { - case E1000_RXBUFFER_2048: - default: - rctl |= E1000_RCTL_SZ_2048; - rctl &= ~E1000_RCTL_BSEX; - break; - case E1000_RXBUFFER_4096: - rctl |= E1000_RCTL_SZ_4096; - break; - case E1000_RXBUFFER_8192: - rctl |= E1000_RCTL_SZ_8192; - break; - case E1000_RXBUFFER_16384: - rctl |= E1000_RCTL_SZ_16384; - break; + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; } /* This is useful for sniffing bad packets. */ @@ -1861,12 +1861,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) if (adapter->netdev->mtu > ETH_DATA_LEN) { rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_jumbo_rx_irq; adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } @@ -2761,7 +2761,9 @@ static int e1000_tso(struct e1000_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - if (++i == tx_ring->count) i = 0; + if (++i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; return true; @@ -2816,7 +2818,9 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; + tx_ring->next_to_use = i; return true; @@ -2865,8 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, * packet is smaller than 2048 - 16 - 16 (or 2016) bytes */ if (unlikely((hw->bus_type == e1000_bus_type_pcix) && - (size > 2015) && count == 0)) - size = 2015; + (size > 2015) && count == 0)) + size = 2015; /* Workaround for potential 82544 hang in PCI-X. Avoid * terminating buffers within evenly-aligned dwords. @@ -2963,7 +2967,7 @@ dma_error: count--; while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -3013,7 +3017,8 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, tx_desc->lower.data = cpu_to_le32(txd_lower | buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; } tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); @@ -3101,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, return __e1000_maybe_stop_tx(netdev, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -3841,7 +3846,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; - unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; unsigned int bytes_compl = 0, pkts_compl = 0; i = tx_ring->next_to_clean; @@ -3869,14 +3874,18 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; - if (unlikely(++i == tx_ring->count)) i = 0; + if (unlikely(++i == tx_ring->count)) + i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } - tx_ring->next_to_clean = i; + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); netdev_completed_queue(netdev, pkts_compl, bytes_compl); @@ -3954,9 +3963,11 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, skb_checksum_none_assert(skb); /* 82543 or newer only */ - if (unlikely(hw->mac_type < e1000_82543)) return; + if (unlikely(hw->mac_type < e1000_82543)) + return; /* Ignore Checksum bit is set */ - if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; /* TCP/UDP checksum error bit is set */ if (unlikely(errors & E1000_RXD_ERR_TCPE)) { /* let the stack verify checksum errors */ @@ -4136,7 +4147,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); @@ -4153,7 +4164,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, status = rx_desc->status; - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); prefetch(next_rxd); @@ -4356,7 +4369,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); @@ -4395,7 +4408,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, buffer_info->rxbuf.data = NULL; } - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); prefetch(next_rxd); @@ -4683,9 +4698,11 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) * we assume back-to-back */ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); if (phy_ctrl & CR_1000T_MS_ENABLE) { phy_ctrl &= ~CR_1000T_MS_ENABLE; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 133d4074dbe4..f7c7804d79e5 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -441,12 +441,13 @@ #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ #define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ -#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupt */ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_ICS_OTHER E1000_ICR_OTHER /* Other Interrupt */ /* Transmit Descriptor Control */ #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 0b748d1959d9..1dc293bad87b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -480,7 +480,7 @@ extern const char e1000e_driver_version[]; void e1000e_check_options(struct e1000_adapter *adapter); void e1000e_set_ethtool_ops(struct net_device *netdev); -int e1000e_up(struct e1000_adapter *adapter); +void e1000e_up(struct e1000_adapter *adapter); void e1000e_down(struct e1000_adapter *adapter, bool reset); void e1000e_reinit_locked(struct e1000_adapter *adapter); void e1000e_reset(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index c9da4654e9ca..b3949d5bef5c 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -91,6 +91,7 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ #define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ #define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LBG PCH */ #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 91a5a0ae9cd7..a049e30639a1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1984,7 +1984,7 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) int i = 0; while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && - (i++ < 10)) + (i++ < 30)) usleep_range(10000, 20000); return blocked ? E1000_BLK_PHY_RESET : 0; } @@ -3093,24 +3093,45 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) struct e1000_nvm_info *nvm = &hw->nvm; u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u32 nvm_dword = 0; u8 sig_byte = 0; s32 ret_val; switch (hw->mac.type) { - /* In SPT, read from the CTRL_EXT reg instead of - * accessing the sector valid bits from the nvm - */ case e1000_pch_spt: - *bank = er32(CTRL_EXT) - & E1000_CTRL_EXT_NVMVS; - if ((*bank == 0) || (*bank == 1)) { - e_dbg("ERROR: No valid NVM bank present\n"); - return -E1000_ERR_NVM; - } else { - *bank = *bank - 2; + bank1_offset = nvm->flash_bank_size; + act_offset = E1000_ICH_NVM_SIG_WORD; + + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; return 0; } - break; + + /* Check bank 1 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + + bank1_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0a854a47d31a..c71ba1bfc1ec 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1905,30 +1905,15 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - if (!(icr & E1000_ICR_INT_ASSERTED)) { - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_OTHER); - return IRQ_NONE; - } - if (icr & adapter->eiac_mask) - ew32(ICS, (icr & adapter->eiac_mask)); + hw->mac.get_link_status = true; - if (icr & E1000_ICR_OTHER) { - if (!(icr & E1000_ICR_LSC)) - goto no_link_interrupt; - hw->mac.get_link_status = true; - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) { + mod_timer(&adapter->watchdog_timer, jiffies + 1); + ew32(IMS, E1000_IMS_OTHER); } -no_link_interrupt: - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); - return IRQ_HANDLED; } @@ -1946,6 +1931,9 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) /* Ring was not completely cleaned, so fire another interrupt */ ew32(ICS, tx_ring->ims_val); + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, adapter->tx_ring->ims_val); + return IRQ_HANDLED; } @@ -1959,8 +1947,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) * previous interrupt. */ if (rx_ring->set_itr) { - writel(1000000000 / (rx_ring->itr_val * 256), - rx_ring->itr_register); + u32 itr = rx_ring->itr_val ? + 1000000000 / (rx_ring->itr_val * 256) : 0; + + writel(itr, rx_ring->itr_register); rx_ring->set_itr = 0; } @@ -2025,6 +2015,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) hw->hw_addr + E1000_EITR_82574(vector)); else writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + adapter->eiac_mask |= E1000_IMS_OTHER; /* Cause Tx interrupts on every write back */ ivar |= (1 << 31); @@ -2032,12 +2023,8 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) ew32(IVAR, ivar); /* enable MSI-X PBA support */ - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; - - /* Auto-Mask Other interrupts upon ICR read */ - ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); - ctrl_ext |= E1000_CTRL_EXT_EIAME; + ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME; + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME; ew32(CTRL_EXT, ctrl_ext); e1e_flush(); } @@ -2253,7 +2240,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); - ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); } else if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); @@ -4144,10 +4131,24 @@ void e1000e_reset(struct e1000_adapter *adapter) } -int e1000e_up(struct e1000_adapter *adapter) +/** + * e1000e_trigger_lsc - trigger an LSC interrupt + * @adapter: + * + * Fire a link status change interrupt to start the watchdog. + **/ +static void e1000e_trigger_lsc(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_OTHER); + else + ew32(ICS, E1000_ICS_LSC); +} + +void e1000e_up(struct e1000_adapter *adapter) +{ /* hardware has been reset, we need to reload some things */ e1000_configure(adapter); @@ -4159,13 +4160,7 @@ int e1000e_up(struct e1000_adapter *adapter) netif_start_queue(adapter->netdev); - /* fire a link change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); - - return 0; + e1000e_trigger_lsc(adapter); } static void e1000e_flush_descriptors(struct e1000_adapter *adapter) @@ -4590,11 +4585,7 @@ static int e1000_open(struct net_device *netdev) hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); - /* fire a link status change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); + e1000e_trigger_lsc(adapter); return 0; @@ -6631,7 +6622,7 @@ static int e1000e_pm_runtime_resume(struct device *dev) return rc; if (netdev->flags & IFF_UP) - rc = e1000e_up(adapter); + e1000e_up(adapter); return rc; } @@ -6822,13 +6813,8 @@ static void e1000_io_resume(struct pci_dev *pdev) e1000_init_manageability_pt(adapter); - if (netif_running(netdev)) { - if (e1000e_up(adapter)) { - dev_err(&pdev->dev, - "can't bring device back up after reset\n"); - return; - } - } + if (netif_running(netdev)) + e1000e_up(adapter); netif_device_attach(netdev); @@ -7465,6 +7451,7 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; @@ -7504,14 +7491,11 @@ static struct pci_driver e1000_driver = { **/ static int __init e1000_init_module(void) { - int ret; - pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); - ret = pci_register_driver(&e1000_driver); - return ret; + return pci_register_driver(&e1000_driver); } module_init(e1000_init_module); diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index 08859dd220a8..b006ff66d028 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Switch Host Interface Driver -# Copyright(c) 2013 - 2014 Intel Corporation. +# Copyright(c) 2013 - 2015 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -27,7 +27,17 @@ obj-$(CONFIG_FM10K) += fm10k.o -fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \ - fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \ - fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \ - fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o +fm10k-y := fm10k_main.o \ + fm10k_common.o \ + fm10k_pci.o \ + fm10k_ptp.o \ + fm10k_netdev.o \ + fm10k_ethtool.o \ + fm10k_pf.o \ + fm10k_vf.o \ + fm10k_mbx.o \ + fm10k_iov.o \ + fm10k_tlv.o + +fm10k-$(CONFIG_DEBUG_FS) += fm10k_debugfs.o +fm10k-$(CONFIG_DCB) += fm10k_dcbnl.o diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 14440200499b..b34bb008b104 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -23,6 +23,7 @@ #include <linux/types.h> #include <linux/etherdevice.h> +#include <linux/cpumask.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/pci.h> @@ -33,7 +34,7 @@ #include "fm10k_pf.h" #include "fm10k_vf.h" -#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */ +#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */ #define MAX_QUEUES FM10K_MAX_QUEUES_PF @@ -66,6 +67,7 @@ struct fm10k_l2_accel { enum fm10k_ring_state_t { __FM10K_TX_DETECT_HANG, __FM10K_HANG_CHECK_ARMED, + __FM10K_TX_XPS_INIT_DONE, }; #define check_for_tx_hang(ring) \ @@ -138,7 +140,7 @@ struct fm10k_ring { * different for DCB and RSS modes */ u8 qos_pc; /* priority class of queue */ - u16 vid; /* default vlan ID of queue */ + u16 vid; /* default VLAN ID of queue */ u16 count; /* amount of descriptors */ u16 next_to_alloc; @@ -164,14 +166,20 @@ struct fm10k_ring_container { unsigned int total_packets; /* total packets processed this int */ u16 work_limit; /* total work allowed per interrupt */ u16 itr; /* interrupt throttle rate value */ + u8 itr_scale; /* ITR adjustment based on PCI speed */ u8 count; /* total number of rings in vector */ }; #define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */ #define FM10K_ITR_10K 100 /* 100us */ #define FM10K_ITR_20K 50 /* 50us */ +#define FM10K_ITR_40K 25 /* 25us */ #define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */ +#define ITR_IS_ADAPTIVE(itr) (!!(itr & FM10K_ITR_ADAPTIVE)) + +#define FM10K_TX_ITR_DEFAULT FM10K_ITR_40K +#define FM10K_RX_ITR_DEFAULT FM10K_ITR_20K #define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR) static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) @@ -203,6 +211,7 @@ struct fm10k_q_vector { struct fm10k_ring_container rx, tx; struct napi_struct napi; + cpumask_t affinity_mask; char name[IFNAMSIZ + 9]; #ifdef CONFIG_DEBUG_FS @@ -413,7 +422,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) (&(((union fm10k_rx_desc *)((R)->desc))[i])) #define FM10K_MAX_TXD_PWR 14 -#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR) +#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) @@ -434,7 +443,7 @@ union fm10k_ftag_info { struct { /* dglort and sglort combined into a single 32bit desc read */ __le32 glort; - /* upper 16 bits of vlan are reserved 0 for swpri_type_user */ + /* upper 16 bits of VLAN are reserved 0 for swpri_type_user */ __le32 vlan; } d; struct { @@ -484,7 +493,7 @@ void fm10k_netpoll(struct net_device *netdev); #endif /* Netdev */ -struct net_device *fm10k_alloc_netdev(void); +struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); int fm10k_setup_rx_resources(struct fm10k_ring *); int fm10k_setup_tx_resources(struct fm10k_ring *); void fm10k_free_rx_resources(struct fm10k_ring *); @@ -551,5 +560,9 @@ int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr); int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr); /* DCB */ +#ifdef CONFIG_DCB void fm10k_dcbnl_set_ops(struct net_device *dev); +#else +static inline void fm10k_dcbnl_set_ops(struct net_device *dev) {} +#endif #endif /* _FM10K_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 5c7a4d7662d8..2be4361839db 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -20,7 +20,6 @@ #include "fm10k.h" -#ifdef CONFIG_DCB /** * fm10k_dcbnl_ieee_getets - get the ETS configuration for the device * @dev: netdev interface for the device @@ -155,7 +154,6 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { .setdcbx = fm10k_dcbnl_setdcbx, }; -#endif /* CONFIG_DCB */ /** * fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev * @dev: netdev interface for the device @@ -164,11 +162,9 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { **/ void fm10k_dcbnl_set_ops(struct net_device *dev) { -#ifdef CONFIG_DCB struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_hw *hw = &interface->hw; if (hw->mac.type == fm10k_mac_pf) dev->dcbnl_ops = &fm10k_dcbnl_ops; -#endif /* CONFIG_DCB */ } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 5304bc1fbecd..5d6137faf7d1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -18,8 +18,6 @@ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 */ -#ifdef CONFIG_DEBUG_FS - #include "fm10k.h" #include <linux/debugfs.h> @@ -258,5 +256,3 @@ void fm10k_dbg_exit(void) debugfs_remove_recursive(dbg_root); dbg_root = NULL; } - -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2ce0eba5e040..2f6a05b57228 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -111,12 +111,14 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_tx_busy", tx_busy), - FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped), + FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped), FM10K_MBX_STAT("mbx_tx_messages", tx_messages), FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords), + FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled), FM10K_MBX_STAT("mbx_rx_messages", rx_messages), FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords), FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err), + FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), }; #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) @@ -125,7 +127,7 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) #define FM10K_QUEUE_STATS_LEN(_n) \ - ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) + ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ FM10K_NETDEV_STATS_LEN + \ @@ -257,7 +259,8 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) stats_len += FM10K_DEBUG_STATS_LEN; if (iov_data) - stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs; + stats_len += FM10K_MBX_STATS_LEN * + iov_data->num_vfs; } return stats_len; @@ -296,14 +299,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, if (interface->flags & FM10K_FLAG_DEBUG_STATS) { for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { - p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset; + p = (char *)interface + + fm10k_gstrings_debug_stats[i].stat_offset; *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { - p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset; + p = (char *)&interface->hw.mbx + + fm10k_gstrings_mbx_stats[i].stat_offset; *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -320,6 +325,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { for (i = 0; i < iov_data->num_vfs; i++) { struct fm10k_vf_info *vf_info; + vf_info = &iov_data->vf_info[i]; /* skip stats if we don't have a vf info */ @@ -329,7 +335,8 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, } for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { - p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset; + p = (char *)&vf_info->mbx + + fm10k_gstrings_mbx_stats[j].stat_offset; *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -699,12 +706,10 @@ static int fm10k_get_coalesce(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); - ec->use_adaptive_tx_coalesce = - !!(interface->tx_itr & FM10K_ITR_ADAPTIVE); + ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr); ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; - ec->use_adaptive_rx_coalesce = - !!(interface->rx_itr & FM10K_ITR_ADAPTIVE); + ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr); ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; return 0; @@ -729,10 +734,10 @@ static int fm10k_set_coalesce(struct net_device *dev, /* set initial values for adaptive ITR */ if (ec->use_adaptive_tx_coalesce) - tx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_10K; + tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT; if (ec->use_adaptive_rx_coalesce) - rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; /* update interface */ interface->tx_itr = tx_itr; @@ -1020,7 +1025,6 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) return 0; } - static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e76a44cf330c..b243c3cbe68f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.15.2-k" +#define DRV_VERSION "0.19.3-k" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = @@ -42,7 +42,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /* single workqueue for entire fm10k driver */ -struct workqueue_struct *fm10k_workqueue = NULL; +struct workqueue_struct *fm10k_workqueue; /** * fm10k_init_module - Driver Registration Routine @@ -56,8 +56,7 @@ static int __init fm10k_init_module(void) pr_info("%s\n", fm10k_copyright); /* create driver workqueue */ - if (!fm10k_workqueue) - fm10k_workqueue = create_workqueue("fm10k"); + fm10k_workqueue = create_workqueue("fm10k"); fm10k_dbg_init(); @@ -80,7 +79,6 @@ static void __exit fm10k_exit_module(void) /* destroy driver workqueue */ flush_workqueue(fm10k_workqueue); destroy_workqueue(fm10k_workqueue); - fm10k_workqueue = NULL; } module_exit(fm10k_exit_module); @@ -917,7 +915,7 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) /* set timestamping bits */ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - desc_flags |= FM10K_TXD_FLAG_TIME; + desc_flags |= FM10K_TXD_FLAG_TIME; /* set checksum offload bits */ desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, @@ -1094,11 +1092,11 @@ dma_error: netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring) { + u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct fm10k_tx_buffer *first; - int tso; - u32 tx_flags = 0; unsigned short f; - u16 count = TXD_USE_COUNT(skb_headlen(skb)); + u32 tx_flags = 0; + int tso; /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, @@ -1363,10 +1361,10 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, **/ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) { - unsigned int avg_wire_size, packets; + unsigned int avg_wire_size, packets, itr_round; /* Only update ITR if we are using adaptive setting */ - if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) + if (!ITR_IS_ADAPTIVE(ring_container->itr)) goto clear_counts; packets = ring_container->total_packets; @@ -1375,18 +1373,44 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) avg_wire_size = ring_container->total_bytes / packets; - /* Add 24 bytes to size to account for CRC, preamble, and gap */ - avg_wire_size += 24; - - /* Don't starve jumbo frames */ - if (avg_wire_size > 3000) - avg_wire_size = 3000; + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (34 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 360) { + /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ + avg_wire_size *= 8; + avg_wire_size += 376; + } else if (avg_wire_size <= 1152) { + /* 77K ints/sec to 45K ints/sec */ + avg_wire_size *= 3; + avg_wire_size += 2176; + } else if (avg_wire_size <= 1920) { + /* 45K ints/sec to 38K ints/sec */ + avg_wire_size += 4480; + } else { + /* plateau at a limit of 38K ints/sec */ + avg_wire_size = 6656; + } - /* Give a little boost to mid-size frames */ - if ((avg_wire_size > 300) && (avg_wire_size < 1200)) - avg_wire_size /= 3; - else - avg_wire_size /= 2; + /* Perform final bitshift for division after rounding up to ensure + * that the calculation will never get below a 1. The bit shift + * accounts for changes in the ITR due to PCIe link speed. + */ + itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; + avg_wire_size += (1 << itr_round) - 1; + avg_wire_size >>= itr_round; /* write back value and retain adaptive flag */ ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; @@ -1428,11 +1452,15 @@ static int fm10k_poll(struct napi_struct *napi, int budget) fm10k_for_each_ring(ring, q_vector->tx) clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + return budget; + /* attempt to distribute budget to each queue fairly, but don't * allow the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) - per_ring_budget = max(budget/q_vector->rx.count, 1); + per_ring_budget = max(budget / q_vector->rx.count, 1); else per_ring_budget = budget; @@ -1600,6 +1628,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, q_vector->tx.ring = ring; q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; q_vector->tx.itr = interface->tx_itr; + q_vector->tx.itr_scale = interface->hw.mac.itr_scale; q_vector->tx.count = txr_count; while (txr_count) { @@ -1628,6 +1657,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, /* save Rx ring container info */ q_vector->rx.ring = ring; q_vector->rx.itr = interface->rx_itr; + q_vector->rx.itr_scale = interface->hw.mac.itr_scale; q_vector->rx.count = rxr_count; while (rxr_count) { @@ -1966,8 +1996,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) /* Allocate memory for queues */ err = fm10k_alloc_q_vectors(interface); - if (err) + if (err) { + fm10k_reset_msix_capability(interface); return err; + } /* Map rings to devices, and map devices to physical queues */ fm10k_assign_rings(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index af09a1b272e6..98202c3d591c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -57,7 +57,7 @@ static u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo) } /** - * fm10k_fifo_empty - Test to verify if fifo is empty + * fm10k_fifo_empty - Test to verify if FIFO is empty * @fifo: pointer to FIFO * * This function returns true if the FIFO is empty, else false @@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) * @fifo: pointer to FIFO * @offset: offset to add to head * - * This function returns the indices into the fifo based on head + offset + * This function returns the indices into the FIFO based on head + offset **/ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) * @fifo: pointer to FIFO * @offset: offset to add to tail * - * This function returns the indices into the fifo based on tail + offset + * This function returns the indices into the FIFO based on tail + offset **/ static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -160,7 +160,7 @@ static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) /** * fm10k_mbx_tail_add - Determine new tail value with added offset * @mbx: pointer to mailbox - * @offset: length to add to head offset + * @offset: length to add to tail offset * * This function takes the local tail index and recomputes it for * a given length added as an offset. @@ -176,7 +176,7 @@ static u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) /** * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset * @mbx: pointer to mailbox - * @offset: length to add to head offset + * @offset: length to add to tail offset * * This function takes the local tail index and recomputes it for * a given length added as an offset. @@ -240,7 +240,7 @@ static u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx) } /** - * fm10k_fifo_write_copy - pulls data off of msg and places it in fifo + * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO * @fifo: pointer to FIFO * @msg: message array to populate * @tail_offset: additional offset to add to tail pointer @@ -336,6 +336,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) /** * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will take a section of the Tx FIFO and copy it into the @@ -375,6 +376,8 @@ static void fm10k_mbx_write_copy(struct fm10k_hw *hw, if (!tail) tail++; + mbx->tx_mbmem_pulled++; + /* write message to hardware FIFO */ fm10k_write_reg(hw, mbmem + tail++, *(head++)); } while (--len && --end); @@ -459,6 +462,8 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, if (!head) head++; + mbx->rx_mbmem_pushed++; + /* read message from hardware FIFO */ *(tail++) = fm10k_read_reg(hw, mbmem + head++); } while (--len && --end); @@ -707,7 +712,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) * @hw: pointer to hardware structure * @mbx: pointer to mailbox * - * This function dequeues messages and hands them off to the tlv parser. + * This function dequeues messages and hands them off to the TLV parser. * It will return the number of messages processed when called. **/ static u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw, @@ -899,7 +904,7 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header + * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr * @mbx: pointer to mailbox * * This function creates a fake disconnect header for loading into remote @@ -920,7 +925,7 @@ static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_create_error_msg - Generate a error message + * fm10k_mbx_create_error_msg - Generate an error message * @mbx: pointer to mailbox * @err: local error encountered * @@ -953,7 +958,6 @@ static void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) /** * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header * @mbx: pointer to mailbox - * @msg: message array to read * * This function will parse up the fields in the mailbox header and return * an error if the header contains any of a number of invalid configurations @@ -1017,11 +1021,12 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) /** * fm10k_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @head: acknowledgement number * * This function will generate an outgoing message based on the current - * mailbox state and the remote fifo head. It will return the length + * mailbox state and the remote FIFO head. It will return the length * of the outgoing message excluding header on success, and a negative value * on error. **/ @@ -1147,8 +1152,8 @@ static void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx) /** * fm10k_mbx_process_connect - Process connect header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox - * @msg: message array to process * * This function will read an incoming connect header and reply with the * appropriate message. It will return a value indicating the number of @@ -1194,6 +1199,7 @@ static s32 fm10k_mbx_process_connect(struct fm10k_hw *hw, /** * fm10k_mbx_process_data - Process data header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming data header and reply with the @@ -1235,6 +1241,7 @@ static s32 fm10k_mbx_process_data(struct fm10k_hw *hw, /** * fm10k_mbx_process_disconnect - Process disconnect header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming disconnect header and reply with the @@ -1287,6 +1294,7 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw, /** * fm10k_mbx_process_error - Process error header + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * * This function will read an incoming error header and reply with the @@ -1556,7 +1564,7 @@ static s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx, * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes * * This function initializes the mailbox for use. It will split the - * buffer provided an use that th populate both the Tx and Rx FIFO by + * buffer provided and use that to populate both the Tx and Rx FIFO by * evenly splitting it. In order to allow for easy masking of head/tail * the value reported in size must be a power of 2 and is reported in * DWORDs, not bytes. Any invalid values will cause the mailbox to return @@ -1633,7 +1641,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO * @mbx: pointer to mailbox * - * This function returns a connection mailbox header + * This function returns a data mailbox header **/ static void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) { @@ -1726,8 +1734,6 @@ static s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) fm10k_sm_mbx_create_connect_hdr(mbx, 0); fm10k_mbx_write(hw, mbx); - /* enable interrupt and notify other party of new message */ - return 0; } @@ -1771,7 +1777,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, } /** - * fm10k_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header + * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header * @mbx: pointer to mailbox * * This function will parse up the fields in the mailbox header and return @@ -1849,7 +1855,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx) } /** - * fm10k_sm_mbx_create_error_message - Process an error in FIFO hdr + * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header * @mbx: pointer to mailbox * @err: local error encountered * @@ -1879,6 +1885,7 @@ static void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx * @hw: pointer to hardware structure * @mbx: pointer to mailbox + * @tail: tail index of message * * This function will dequeue one message from the Rx switch manager mailbox * FIFO and place it in the Rx mailbox FIFO for processing by software. @@ -1918,6 +1925,7 @@ static s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw, * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO * @hw: pointer to hardware structure * @mbx: pointer to mailbox + * @head: head index of message * * This function will dequeue one message from the Tx mailbox FIFO and place * it in the Tx switch manager mailbox FIFO for processing by hardware. @@ -1957,11 +1965,12 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /** * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @head: acknowledgement number * * This function will generate an outgoing message based on the current - * mailbox state and the remote fifo head. It will return the length + * mailbox state and the remote FIFO head. It will return the length * of the outgoing message excluding header on success, and a negative value * on error. **/ @@ -2073,7 +2082,7 @@ send_reply: } /** - * fm10k_sm_mbx_process - Process mailbox switch mailbox interrupt + * fm10k_sm_mbx_process - Process switch manager mailbox interrupt * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @@ -2129,13 +2138,19 @@ fifo_err: * @mbx: pointer to mailbox * @msg_data: handlers for mailbox events * - * This function for now is used to stub out the PF/SM mailbox + * This function initializes the PF/SM mailbox for use. It will split the + * buffer provided and use that to populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. **/ s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, const struct fm10k_msg_data *msg_data) { mbx->mbx_reg = FM10K_GMBX; mbx->mbmem_reg = FM10K_MBMEM_PF(0); + /* start out in closed state */ mbx->state = FM10K_STATE_CLOSED; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 0419a7f0035e..245a0a3dc32e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -128,11 +128,11 @@ enum fm10k_mbx_state { * The maximum message size is provided during connect to avoid * jamming the mailbox with messages that do not fit. * Err_no: Error number - Applies only to error headers - * The error number provides a indication of the type of error + * The error number provides an indication of the type of error * experienced. */ -/* macros for retriving and setting header values */ +/* macros for retrieving and setting header values */ #define FM10K_MSG_HDR_MASK(name) \ ((0x1u << FM10K_MSG_##name##_SIZE) - 1) #define FM10K_MSG_HDR_FIELD_SET(value, name) \ @@ -291,8 +291,10 @@ struct fm10k_mbx_info { u64 tx_dropped; u64 tx_messages; u64 tx_dwords; + u64 tx_mbmem_pulled; u64 rx_messages; u64 rx_dwords; + u64 rx_mbmem_pushed; u64 rx_parse_err; /* Buffer to store messages */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 7781e80896a6..662569d5b7c0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -20,7 +20,7 @@ #include "fm10k.h" #include <linux/vmalloc.h> -#if IS_ENABLED(CONFIG_FM10K_VXLAN) +#ifdef CONFIG_FM10K_VXLAN #include <net/vxlan.h> #endif /* CONFIG_FM10K_VXLAN */ @@ -556,11 +556,11 @@ int fm10k_open(struct net_device *netdev) if (err) goto err_set_queues; -#if IS_ENABLED(CONFIG_FM10K_VXLAN) +#ifdef CONFIG_FM10K_VXLAN /* update VXLAN port configuration */ vxlan_get_rx_port(netdev); - #endif + fm10k_up(interface); return 0; @@ -608,7 +608,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) unsigned int r_idx = skb->queue_mapping; int err; - if ((skb->protocol == htons(ETH_P_8021Q)) && + if ((skb->protocol == htons(ETH_P_8021Q)) && !skb_vlan_tag_present(skb)) { /* FM10K only supports hardware tagging, any tags in frame * are considered 2nd level or "outer" tags @@ -632,7 +632,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - /* locate vlan header */ + /* locate VLAN header */ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); /* pull the 2 key pieces of data out of it */ @@ -705,7 +705,7 @@ static void fm10k_tx_timeout(struct net_device *netdev) } else { netif_info(interface, drv, netdev, "Fake Tx hang detected with timeout of %d seconds\n", - netdev->watchdog_timeo/HZ); + netdev->watchdog_timeo / HZ); /* fake Tx hang - increase the kernel timeout */ if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) @@ -778,7 +778,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (!set) clear_bit(vid, interface->active_vlans); - /* disable the default VID on ring if we have an active VLAN */ + /* disable the default VLAN ID on ring if we have an active VLAN */ for (i = 0; i < interface->num_rx_queues; i++) { struct fm10k_ring *rx_ring = interface->rx_ring[i]; u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); @@ -789,7 +789,9 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) rx_ring->vid &= ~FM10K_VLAN_CLEAR; } - /* Do not remove default VID related entries from VLAN and MAC tables */ + /* Do not remove default VLAN ID related entries from VLAN and MAC + * tables + */ if (!set && vid == hw->mac.default_vid) return 0; @@ -814,7 +816,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (err) goto err_out; - /* set vid prior to syncing/unsyncing the VLAN */ + /* set VLAN ID prior to syncing/unsyncing the VLAN */ interface->vid = vid + (set ? VLAN_N_VID : 0); /* Update the unicast and multicast address list to add/drop VLAN */ @@ -1151,6 +1153,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, int fm10k_setup_tc(struct net_device *dev, u8 tc) { struct fm10k_intfc *interface = netdev_priv(dev); + int err; /* Currently only the PF supports priority classes */ if (tc && (interface->hw.mac.type != fm10k_mac_pf)) @@ -1175,17 +1178,30 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc) netdev_reset_tc(dev); netdev_set_num_tc(dev, tc); - fm10k_init_queueing_scheme(interface); + err = fm10k_init_queueing_scheme(interface); + if (err) + goto err_queueing_scheme; - fm10k_mbx_request_irq(interface); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; - if (netif_running(dev)) - fm10k_open(dev); + err = netif_running(dev) ? fm10k_open(dev) : 0; + if (err) + goto err_open; /* flag to indicate SWPRI has yet to be updated */ interface->flags |= FM10K_FLAG_SWPRI_CONFIG; return 0; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +err_queueing_scheme: + netif_device_detach(dev); + + return err; } static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) @@ -1355,7 +1371,7 @@ static netdev_features_t fm10k_features_check(struct sk_buff *skb, if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) return features; - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } static const struct net_device_ops fm10k_netdev_ops = { @@ -1388,8 +1404,9 @@ static const struct net_device_ops fm10k_netdev_ops = { #define DEFAULT_DEBUG_LEVEL_SHIFT 3 -struct net_device *fm10k_alloc_netdev(void) +struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) { + netdev_features_t hw_features; struct fm10k_intfc *interface; struct net_device *dev; @@ -1412,27 +1429,31 @@ struct net_device *fm10k_alloc_netdev(void) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXHASH | NETIF_F_RXCSUM; + /* Only the PF can support VXLAN and NVGRE tunnel offloads */ + if (info->mac == fm10k_mac_pf) { + dev->hw_enc_features = NETIF_F_IP_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + + dev->features |= NETIF_F_GSO_UDP_TUNNEL; + } + /* all features defined to this point should be changeable */ - dev->hw_features |= dev->features; + hw_features = dev->features; /* allow user to enable L2 forwarding acceleration */ - dev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; /* configure VLAN features */ dev->vlan_features |= dev->features; - /* configure tunnel offloads */ - dev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_IPV6_CSUM; - /* we want to leave these both on as we cannot disable VLAN tag * insertion or stripping on the hardware since it is contained * in the FTAG and not in the frame itself. @@ -1443,5 +1464,7 @@ struct net_device *fm10k_alloc_netdev(void) dev->priv_flags |= IFF_UNICAST_FLT; + dev->hw_features |= hw_features; + return dev; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 74be792f3f1b..4eb7a6fa6b0d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -159,16 +159,40 @@ static void fm10k_reinit(struct fm10k_intfc *interface) fm10k_mbx_free_irq(interface); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + /* delay any future reset requests */ interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); - if (err) + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err); + goto reinit_err; + } + + err = hw->mac.ops.init_hw(hw); + if (err) { dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); + goto reinit_err; + } + + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, + "init_queueing_scheme failed: %d\n", err); + goto reinit_err; + } /* reassociate interrupts */ - fm10k_mbx_request_irq(interface); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; + + err = fm10k_hw_ready(interface); + if (err) + goto err_open; /* update hardware address for VFs if perm_addr has changed */ if (hw->mac.type == fm10k_mac_vf) { @@ -188,14 +212,27 @@ static void fm10k_reinit(struct fm10k_intfc *interface) /* reset clock */ fm10k_ts_reset(interface); - if (netif_running(netdev)) - fm10k_open(netdev); + err = netif_running(netdev) ? fm10k_open(netdev) : 0; + if (err) + goto err_open; fm10k_iov_resume(interface->pdev); rtnl_unlock(); clear_bit(__FM10K_RESETTING, &interface->state); + + return; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +reinit_err: + netif_device_detach(netdev); + + rtnl_unlock(); + + clear_bit(__FM10K_RESETTING, &interface->state); } static void fm10k_reset_subtask(struct fm10k_intfc *interface) @@ -563,7 +600,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, /* store tail pointer */ ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; - /* reset ntu and ntc to place SW in sync with hardwdare */ + /* reset ntu and ntc to place SW in sync with hardware */ ring->next_to_clean = 0; ring->next_to_use = 0; @@ -579,6 +616,13 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), FM10K_PFVTCTL_FTAG_DESC_ENABLE); + /* Initialize XPS */ + if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) && + ring->q_vector) + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + /* enable queue */ fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); } @@ -669,7 +713,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* store tail pointer */ ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; - /* reset ntu and ntc to place SW in sync with hardwdare */ + /* reset ntu and ntc to place SW in sync with hardware */ ring->next_to_clean = 0; ring->next_to_use = 0; ring->next_to_alloc = 0; @@ -694,7 +738,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* assign default VLAN to queue */ ring->vid = hw->mac.default_vid; - /* if we have an active VLAN, disable default VID */ + /* if we have an active VLAN, disable default VLAN ID */ if (test_bit(hw->mac.default_vid, interface->active_vlans)) ring->vid |= FM10K_VLAN_CLEAR; @@ -846,7 +890,7 @@ static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) struct fm10k_q_vector *q_vector = data; if (q_vector->rx.count || q_vector->tx.count) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -859,7 +903,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> + hw->mac.itr_scale)); /* service upstream mailbox */ if (fm10k_mbx_trylock(interface)) { @@ -867,7 +912,7 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) fm10k_mbx_unlock(interface); } - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; fm10k_service_event_schedule(interface); return IRQ_HANDLED; @@ -897,7 +942,7 @@ void fm10k_netpoll(struct net_device *netdev) #endif #define FM10K_ERR_MSG(type) case (type): error = #type; break static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, - struct fm10k_fault *fault) + struct fm10k_fault *fault) { struct pci_dev *pdev = interface->pdev; struct fm10k_hw *hw = &interface->hw; @@ -1083,14 +1128,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) } /* we should validate host state after interrupt event */ - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; /* validate host state, and handle VF mailboxes in the service task */ fm10k_service_event_schedule(interface); /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> + hw->mac.itr_scale)); return IRQ_HANDLED; } @@ -1101,6 +1147,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface) struct fm10k_hw *hw = &interface->hw; int itr_reg; + /* no mailbox IRQ to free if MSI-X is not enabled */ + if (!interface->msix_entries) + return; + /* disconnect the mailbox */ hw->mbx.ops.disconnect(hw, &hw->mbx); @@ -1141,7 +1191,7 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, /* MAC was changed so we need reset */ if (is_valid_ether_addr(hw->mac.perm_addr) && - memcmp(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN)) + !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr)) interface->flags |= FM10K_FLAG_RESET_REQUESTED; /* VLAN override was changed, or default VLAN changed */ @@ -1269,7 +1319,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; - /* verify VID is valid */ + /* verify VLAN ID is valid */ if (pvid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -1388,14 +1438,14 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) } /* Enable interrupts w/ no moderation for "other" interrupts */ - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr); - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr); /* Enable interrupts w/ moderation for mailbox */ - fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr); /* Enable individual interrupt causes */ fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | @@ -1423,10 +1473,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface) err = fm10k_mbx_request_irq_pf(interface); else err = fm10k_mbx_request_irq_vf(interface); + if (err) + return err; /* connect mailbox */ - if (!err) - err = hw->mbx.ops.connect(hw, &hw->mbx); + err = hw->mbx.ops.connect(hw, &hw->mbx); + + /* if the mailbox failed to connect, then free IRQ */ + if (err) + fm10k_mbx_free_irq(interface); return err; } @@ -1455,8 +1510,10 @@ void fm10k_qv_free_irq(struct fm10k_intfc *interface) if (!q_vector->tx.count && !q_vector->rx.count) continue; - /* disable interrupts */ + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + /* disable interrupts */ writel(FM10K_ITR_MASK_SET, q_vector->itr); free_irq(entry->vector, q_vector); @@ -1514,6 +1571,9 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) goto err_out; } + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); + /* Enable q_vector */ writel(FM10K_ITR_ENABLE, q_vector->itr); @@ -1534,8 +1594,10 @@ err_out: if (!q_vector->tx.count && !q_vector->rx.count) continue; - /* disable interrupts */ + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + /* disable interrupts */ writel(FM10K_ITR_MASK_SET, q_vector->itr); free_irq(entry->vector, q_vector); @@ -1573,7 +1635,7 @@ void fm10k_up(struct fm10k_intfc *interface) netif_tx_start_all_queues(interface->netdev); /* kick off the service timer now */ - hw->mac.get_host_state = 1; + hw->mac.get_host_state = true; mod_timer(&interface->service_timer, jiffies); } @@ -1684,7 +1746,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&pdev->dev, "reset_hw failed: %d\n", err); + return err; + } + + err = hw->mac.ops.init_hw(hw); if (err) { dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; @@ -1722,13 +1790,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, pci_resource_len(pdev, 4)); hw->sw_addr = interface->sw_addr; - /* Only the PF can support VXLAN and NVGRE offloads */ - if (hw->mac.type != fm10k_mac_pf) { - netdev->hw_enc_features = 0; - netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; - } - /* initialize DCBNL interface */ fm10k_dcbnl_set_ops(netdev); @@ -1749,8 +1810,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->rx_ring_count = FM10K_DEFAULT_RXD; /* set default interrupt moderation */ - interface->tx_itr = FM10K_ITR_10K; - interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + interface->tx_itr = FM10K_TX_ITR_DEFAULT; + interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; /* initialize vxlan_port list */ INIT_LIST_HEAD(&interface->vxlan_port); @@ -1835,17 +1896,18 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface) return; } - if (max_gts < expected_gts) { - dev_warn(&interface->pdev->dev, - "This device requires %dGT/s of bandwidth for optimal performance.\n", - expected_gts); - dev_warn(&interface->pdev->dev, - "A %sslot with x%d lanes is suggested.\n", - (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : - hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : - hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), - hw->bus_caps.width); - } + if (max_gts >= expected_gts) + return; + + dev_warn(&interface->pdev->dev, + "This device requires %dGT/s of bandwidth for optimal performance.\n", + expected_gts); + dev_warn(&interface->pdev->dev, + "A %sslot with x%d lanes is suggested.\n", + (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : + hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), + hw->bus_caps.width); } /** @@ -1859,8 +1921,7 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface) * The OS initialization, configuring of the interface private structure, * and a hardware reset occur. **/ -static int fm10k_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct fm10k_intfc *interface; @@ -1894,7 +1955,7 @@ static int fm10k_probe(struct pci_dev *pdev, pci_set_master(pdev); pci_save_state(pdev); - netdev = fm10k_alloc_netdev(); + netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]); if (!netdev) { err = -ENOMEM; goto err_alloc_netdev; @@ -2071,8 +2132,10 @@ static int fm10k_resume(struct pci_dev *pdev) /* reset hardware to known state */ err = hw->mac.ops.init_hw(&interface->hw); - if (err) + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); @@ -2083,16 +2146,22 @@ static int fm10k_resume(struct pci_dev *pdev) rtnl_lock(); err = fm10k_init_queueing_scheme(interface); - if (!err) { - fm10k_mbx_request_irq(interface); - if (netif_running(netdev)) - err = fm10k_open(netdev); - } + if (err) + goto err_queueing_scheme; - rtnl_unlock(); + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_irq; + err = fm10k_hw_ready(interface); if (err) - return err; + goto err_open; + + err = netif_running(netdev) ? fm10k_open(netdev) : 0; + if (err) + goto err_open; + + rtnl_unlock(); /* assume host is not ready, to prevent race with watchdog in case we * actually don't have connection to the switch @@ -2110,6 +2179,14 @@ static int fm10k_resume(struct pci_dev *pdev) netif_device_attach(netdev); return 0; +err_open: + fm10k_mbx_free_irq(interface); +err_mbx_irq: + fm10k_clear_queueing_scheme(interface); +err_queueing_scheme: + rtnl_unlock(); + + return err; } /** @@ -2185,6 +2262,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) fm10k_close(netdev); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + fm10k_mbx_free_irq(interface); pci_disable_device(pdev); @@ -2248,11 +2328,22 @@ static void fm10k_io_resume(struct pci_dev *pdev) int err = 0; /* reset hardware to known state */ - hw->mac.ops.init_hw(&interface->hw); + err = hw->mac.ops.init_hw(&interface->hw); + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); + return; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, + "init_queueing_scheme failed: %d\n", err); + return; + } + /* reassociate interrupts */ fm10k_mbx_request_irq(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 8c0bdc4e4edd..62ccebc5f728 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -150,19 +150,26 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) FM10K_TPH_RXCTRL_HDR_WROEN); } - /* set max hold interval to align with 1.024 usec in all modes */ + /* set max hold interval to align with 1.024 usec in all modes and + * store ITR scale + */ switch (hw->bus.speed) { case fm10k_bus_speed_2500: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1; break; case fm10k_bus_speed_5000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2; break; case fm10k_bus_speed_8000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; default: dma_ctrl = 0; + /* just in case, assume Gen3 ITR scale */ + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; } @@ -259,7 +266,6 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) { u8 perm_addr[ETH_ALEN]; u32 serial_num; - int i; serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1)); @@ -281,10 +287,8 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) perm_addr[4] = (u8)(serial_num >> 8); perm_addr[5] = (u8)(serial_num); - for (i = 0; i < ETH_ALEN; i++) { - hw->mac.perm_addr[i] = perm_addr[i]; - hw->mac.addr[i] = perm_addr[i]; - } + ether_addr_copy(hw->mac.perm_addr, perm_addr); + ether_addr_copy(hw->mac.addr, perm_addr); return 0; } @@ -325,7 +329,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, /* clear set bit from VLAN ID */ vid &= ~FM10K_VLAN_CLEAR; - /* if glort or vlan are not valid return error */ + /* if glort or VLAN are not valid return error */ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -334,8 +338,8 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, ((u32)mac[3] << 16) | ((u32)mac[4] << 8) | ((u32)mac[5])); - mac_update.mac_upper = cpu_to_le16(((u32)mac[0] << 8) | - ((u32)mac[1])); + mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) | + ((u16)mac[1])); mac_update.vlan = cpu_to_le16(vid); mac_update.glort = cpu_to_le16(glort); mac_update.action = add ? 0 : 1; @@ -410,6 +414,7 @@ static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; + /* if glort is not valid return error */ if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; @@ -903,6 +908,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah); + /* Provide the VF the ITR scale, using software-defined fields in TDLEN + * to pass the information during VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details. + */ + fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); + err_out: /* configure Queue control register */ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & @@ -910,7 +922,7 @@ err_out: txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; - /* assign VID */ + /* assign VLAN ID */ for (i = 0; i < queues_per_pool; i++) fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); @@ -1035,6 +1047,12 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); + /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an + * explanation of how TDLEN is used. + */ + fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i), + hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); } @@ -1155,14 +1173,14 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, } /** - * fm10k_iov_select_vid - Select correct default VID + * fm10k_iov_select_vid - Select correct default VLAN ID * @hw: Pointer to hardware structure - * @vid: VID to correct + * @vid: VLAN ID to correct * - * Will report an error if VID is out of range. For VID = 0, it will return - * either the pf_vid or sw_vid depending on which one is set. + * Will report an error if the VLAN ID is out of range. For VID = 0, it will + * return either the pf_vid or sw_vid depending on which one is set. */ -static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) { if (!vid) return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; @@ -1212,11 +1230,11 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, set = !(vid & FM10K_VLAN_CLEAR); vid &= ~FM10K_VLAN_CLEAR; - err = fm10k_iov_select_vid(vf_info, vid); + err = fm10k_iov_select_vid(vf_info, (u16)vid); if (err < 0) return err; - else - vid = err; + + vid = err; /* update VSI info for VF in regards to VLAN table */ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); @@ -1232,7 +1250,7 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, /* block attempts to set MAC for a locked device */ if (is_valid_ether_addr(vf_info->mac) && - memcmp(mac, vf_info->mac, ETH_ALEN)) + !ether_addr_equal(mac, vf_info->mac)) return FM10K_ERR_PARAM; set = !(vlan & FM10K_VLAN_CLEAR); @@ -1241,8 +1259,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; - else - vlan = err; + + vlan = (u16)err; /* notify switch of request for new unicast address */ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, @@ -1267,8 +1285,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; - else - vlan = err; + + vlan = (u16)err; /* notify switch of request for new multicast address */ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, @@ -1396,14 +1414,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, return err; } -const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { - FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), - FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), - FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), - FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), - FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), -}; - /** * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF * @hw: pointer to hardware structure @@ -1431,9 +1441,10 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, &stats->vlan_drop); - loopback_drop = fm10k_read_hw_stats_32b(hw, - FM10K_STATS_LOOPBACK_DROP, - &stats->loopback_drop); + loopback_drop = + fm10k_read_hw_stats_32b(hw, + FM10K_STATS_LOOPBACK_DROP, + &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); @@ -1678,8 +1689,8 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { * * This handler configures the default VLAN for the PF **/ -s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) +static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) { u16 glort, pvid; u32 pvid_update; @@ -1698,7 +1709,7 @@ s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; - /* verify VID is valid */ + /* verify VLAN ID is valid */ if (pvid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -1855,39 +1866,39 @@ static const struct fm10k_msg_data fm10k_msg_data_pf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -static struct fm10k_mac_ops mac_ops_pf = { - .get_bus_info = &fm10k_get_bus_info_generic, - .reset_hw = &fm10k_reset_hw_pf, - .init_hw = &fm10k_init_hw_pf, - .start_hw = &fm10k_start_hw_generic, - .stop_hw = &fm10k_stop_hw_generic, - .update_vlan = &fm10k_update_vlan_pf, - .read_mac_addr = &fm10k_read_mac_addr_pf, - .update_uc_addr = &fm10k_update_uc_addr_pf, - .update_mc_addr = &fm10k_update_mc_addr_pf, - .update_xcast_mode = &fm10k_update_xcast_mode_pf, - .update_int_moderator = &fm10k_update_int_moderator_pf, - .update_lport_state = &fm10k_update_lport_state_pf, - .update_hw_stats = &fm10k_update_hw_stats_pf, - .rebind_hw_stats = &fm10k_rebind_hw_stats_pf, - .configure_dglort_map = &fm10k_configure_dglort_map_pf, - .set_dma_mask = &fm10k_set_dma_mask_pf, - .get_fault = &fm10k_get_fault_pf, - .get_host_state = &fm10k_get_host_state_pf, - .adjust_systime = &fm10k_adjust_systime_pf, - .read_systime = &fm10k_read_systime_pf, +static const struct fm10k_mac_ops mac_ops_pf = { + .get_bus_info = fm10k_get_bus_info_generic, + .reset_hw = fm10k_reset_hw_pf, + .init_hw = fm10k_init_hw_pf, + .start_hw = fm10k_start_hw_generic, + .stop_hw = fm10k_stop_hw_generic, + .update_vlan = fm10k_update_vlan_pf, + .read_mac_addr = fm10k_read_mac_addr_pf, + .update_uc_addr = fm10k_update_uc_addr_pf, + .update_mc_addr = fm10k_update_mc_addr_pf, + .update_xcast_mode = fm10k_update_xcast_mode_pf, + .update_int_moderator = fm10k_update_int_moderator_pf, + .update_lport_state = fm10k_update_lport_state_pf, + .update_hw_stats = fm10k_update_hw_stats_pf, + .rebind_hw_stats = fm10k_rebind_hw_stats_pf, + .configure_dglort_map = fm10k_configure_dglort_map_pf, + .set_dma_mask = fm10k_set_dma_mask_pf, + .get_fault = fm10k_get_fault_pf, + .get_host_state = fm10k_get_host_state_pf, + .adjust_systime = fm10k_adjust_systime_pf, + .read_systime = fm10k_read_systime_pf, }; -static struct fm10k_iov_ops iov_ops_pf = { - .assign_resources = &fm10k_iov_assign_resources_pf, - .configure_tc = &fm10k_iov_configure_tc_pf, - .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf, +static const struct fm10k_iov_ops iov_ops_pf = { + .assign_resources = fm10k_iov_assign_resources_pf, + .configure_tc = fm10k_iov_configure_tc_pf, + .assign_int_moderator = fm10k_iov_assign_int_moderator_pf, .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf, - .reset_resources = &fm10k_iov_reset_resources_pf, - .set_lport = &fm10k_iov_set_lport_pf, - .reset_lport = &fm10k_iov_reset_lport_pf, - .update_stats = &fm10k_iov_update_stats_pf, - .report_timestamp = &fm10k_iov_report_timestamp_pf, + .reset_resources = fm10k_iov_reset_resources_pf, + .set_lport = fm10k_iov_set_lport_pf, + .reset_lport = fm10k_iov_reset_lport_pf, + .update_stats = fm10k_iov_update_stats_pf, + .report_timestamp = fm10k_iov_report_timestamp_pf, }; static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) @@ -1897,9 +1908,9 @@ static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); } -struct fm10k_info fm10k_pf_info = { +const struct fm10k_info fm10k_pf_info = { .mac = fm10k_mac_pf, - .get_invariants = &fm10k_get_invariants_pf, + .get_invariants = fm10k_get_invariants_pf, .mac_ops = &mac_ops_pf, .iov_ops = &iov_ops_pf, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index 40a0dbc62a04..b2d96b45ca3c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -74,6 +74,11 @@ enum fm10k_pf_tlv_attr_id_v1 { #define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 #define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 +/* The following data structures are overlayed directly onto TLV mailbox + * messages, and must not break 4 byte alignment. Ensure the structures line + * up correctly as per their TLV definition. + */ + struct fm10k_mac_update { __le32 mac_lower; __le16 mac_upper; @@ -81,34 +86,32 @@ struct fm10k_mac_update { __le16 glort; u8 flags; u8 action; -} __packed; +} __aligned(4) __packed; struct fm10k_global_table_data { __le32 used; __le32 avail; -} __packed; +} __aligned(4) __packed; struct fm10k_swapi_error { __le32 status; struct fm10k_global_table_data mac; struct fm10k_global_table_data nexthop; struct fm10k_global_table_data ffu; -} __packed; +} __aligned(4) __packed; struct fm10k_swapi_1588_timestamp { __le64 egress; __le64 ingress; __le16 dglort; __le16 sglort; -} __packed; +} __aligned(4) __packed; s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \ fm10k_lport_map_msg_attr, func) -s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **, - struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[]; #define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \ @@ -129,7 +132,6 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); -extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; -extern struct fm10k_info fm10k_pf_info; +extern const struct fm10k_info fm10k_pf_info; #endif /* _FM10K_PF_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 9b29d7b0377a..ab01bb30752f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -48,8 +48,8 @@ s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) * the attribute buffer. It will return success if provided with a valid * pointers. **/ -s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, - const unsigned char *string) +static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, + const unsigned char *string) { u32 attr_data = 0, len = 0; u32 *attr; @@ -98,7 +98,7 @@ s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, * it in the array pointed by by string. It will return success if provided * with a valid pointers. **/ -s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) +static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) { u32 len; @@ -353,7 +353,7 @@ s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) * function will return NULL on failure, and a pointer to the start * of the nested attributes on success. **/ -u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) +static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) { u32 *attr; @@ -370,7 +370,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) } /** - * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes * @msg: Pointer to message block * * This function closes off an existing set of nested attributes. The @@ -378,7 +378,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) * the case of a nest within the nest this would be the outer nest pointer. * This function will return success provided all pointers are valid. **/ -s32 fm10k_tlv_attr_nest_stop(u32 *msg) +static s32 fm10k_tlv_attr_nest_stop(u32 *msg) { u32 *attr; u32 len; @@ -483,8 +483,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr, * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array * and 0 on success. **/ -s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, - const struct fm10k_tlv_attr *tlv_attr) +static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, + const struct fm10k_tlv_attr *tlv_attr) { u32 i, attr_id, offset = 0; s32 err = 0; @@ -755,7 +755,7 @@ parse_nested: err = fm10k_tlv_attr_get_mac_vlan( results[FM10K_TEST_MSG_MAC_ADDR], result_mac, &result_vlan); - if (!err && memcmp(test_mac, result_mac, ETH_ALEN)) + if (!err && !ether_addr_equal(test_mac, result_mac)) err = FM10K_ERR_INVALID_VALUE; if (!err && test_vlan != result_vlan) err = FM10K_ERR_INVALID_VALUE; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index 7e045e8bf1eb..e1845e0a17d8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,9 +38,9 @@ struct fm10k_msg_data; * mailbox size we will provide a message with the above header and it * will be segmented and transported to the mailbox to the other side where * it is reassembled. It contains the following fields: - * Len: Length of the message in bytes excluding the message header + * Length: Length of the message in bytes excluding the message header * Flags: TBD - * Rule: These will be the message/argument types we pass + * Type/ID: These will be the message/argument types we pass */ /* message data header */ #define FM10K_TLV_ID_SHIFT 0 @@ -106,8 +106,6 @@ struct fm10k_msg_data { #define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func } s32 fm10k_tlv_msg_init(u32 *, u16); -s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *); -s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *); s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16); s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *); s32 fm10k_tlv_attr_put_bool(u32 *, u16); @@ -147,9 +145,6 @@ s32 fm10k_tlv_attr_get_value(u32 *, void *, u32); fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64)) s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32); s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32); -u32 *fm10k_tlv_attr_nest_start(u32 *, u16); -s32 fm10k_tlv_attr_nest_stop(u32 *); -s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *); s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *, const struct fm10k_msg_data *); s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 318a212f0a78..854ebb1906bf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -77,6 +77,7 @@ struct fm10k_hw; #define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 #define FM10K_ERR_PARAM -2 +#define FM10K_ERR_NO_RESOURCES -3 #define FM10K_ERR_REQUESTS_PENDING -4 #define FM10K_ERR_RESET_REQUESTED -5 #define FM10K_ERR_DMA_PENDING -6 @@ -271,6 +272,20 @@ struct fm10k_hw; #define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000) #define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001) #define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002) +/* When fist initialized, VFs need to know the Interrupt Throttle Rate (ITR) + * scale which is based on the PCIe speed but the speed information in the PCI + * configuration space may not be accurate. The PF already knows the ITR scale + * but there is no defined method to pass that information from the PF to the + * VF. This is accomplished during VF initialization by temporarily co-opting + * the yet-to-be-used TDLEN register to have the PF store the ITR shift for + * the VF to retrieve before the VF needs to use the TDLEN register for its + * intended purpose, i.e. before the Tx resources are allocated. + */ +#define FM10K_TDLEN_ITR_SCALE_SHIFT 9 +#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00 +#define FM10K_TDLEN_ITR_SCALE_GEN1 2 +#define FM10K_TDLEN_ITR_SCALE_GEN2 1 +#define FM10K_TDLEN_ITR_SCALE_GEN3 0 #define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003) #define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020 #define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200 @@ -339,7 +354,7 @@ struct fm10k_hw; #define FM10K_VLAN_TABLE_VID_MAX 4096 #define FM10K_VLAN_TABLE_VSI_MAX 64 #define FM10K_VLAN_LENGTH_SHIFT 16 -#define FM10K_VLAN_CLEAR (1 << 15) +#define FM10K_VLAN_CLEAR BIT(15) #define FM10K_VLAN_ALL \ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) @@ -373,13 +388,13 @@ struct fm10k_hw; #define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) enum fm10k_int_source { - fm10k_int_Mailbox = 0, - fm10k_int_PCIeFault = 1, - fm10k_int_SwitchUpDown = 2, - fm10k_int_SwitchEvent = 3, - fm10k_int_SRAM = 4, - fm10k_int_VFLR = 5, - fm10k_int_MaxHoldTime = 6, + fm10k_int_mailbox = 0, + fm10k_int_pcie_fault = 1, + fm10k_int_switch_up_down = 2, + fm10k_int_switch_event = 3, + fm10k_int_sram = 4, + fm10k_int_vflr = 5, + fm10k_int_max_hold_time = 6, fm10k_int_sources_max_pf }; @@ -535,7 +550,6 @@ struct fm10k_mac_ops { struct fm10k_dglort_cfg *); void (*set_dma_mask)(struct fm10k_hw *, u64); s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); - void (*request_lport_map)(struct fm10k_hw *); s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); u64 (*read_systime)(struct fm10k_hw *); }; @@ -559,6 +573,7 @@ struct fm10k_mac_info { bool get_host_state; bool tx_ready; u32 dglort_map; + u8 itr_scale; }; struct fm10k_swapi_table_info { @@ -644,10 +659,10 @@ enum fm10k_devices { }; struct fm10k_info { - enum fm10k_mac_type mac; - s32 (*get_invariants)(struct fm10k_hw *); - struct fm10k_mac_ops *mac_ops; - struct fm10k_iov_ops *iov_ops; + enum fm10k_mac_type mac; + s32 (*get_invariants)(struct fm10k_hw *); + const struct fm10k_mac_ops *mac_ops; + const struct fm10k_iov_ops *iov_ops; }; struct fm10k_hw { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 36c8b0aa08fd..91f8d7311f3b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -28,7 +28,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) { u8 *perm_addr = hw->mac.perm_addr; - u32 bal = 0, bah = 0; + u32 bal = 0, bah = 0, tdlen; s32 err; u16 i; @@ -48,6 +48,9 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) ((u32)perm_addr[2]); } + /* restore default itr_scale for next VF initialization */ + tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT; + /* The queues have already been disabled so we just need to * update their base address registers */ @@ -56,6 +59,12 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) fm10k_write_reg(hw, FM10K_TDBAH(i), bah); fm10k_write_reg(hw, FM10K_RDBAL(i), bal); fm10k_write_reg(hw, FM10K_RDBAH(i), bah); + /* Restore ITR scale in software-defined mechanism in TDLEN + * for next VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details on the use of + * TDLEN here. + */ + fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen); } return 0; @@ -103,7 +112,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) s32 err; u16 i; - /* assume we always have at least 1 queue */ + /* verify we have at least 1 queue */ + if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) || + !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) { + err = FM10K_ERR_NO_RESOURCES; + goto reset_max_queues; + } + + /* determine how many queues we have */ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { /* verify the Descriptor cache offsets are increasing */ tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i)); @@ -119,16 +135,28 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) /* shut down queues we own and reset DMA configuration */ err = fm10k_disable_queues_generic(hw, i); if (err) - return err; + goto reset_max_queues; /* record maximum queue count */ hw->mac.max_queues = i; - /* fetch default VLAN */ + /* fetch default VLAN and ITR scale */ hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) & FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + /* Read the ITR scale from TDLEN. See the definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is + * used here. + */ + hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) & + FM10K_TDLEN_ITR_SCALE_MASK) >> + FM10K_TDLEN_ITR_SCALE_SHIFT; return 0; + +reset_max_queues: + hw->mac.max_queues = 0; + + return err; } /* This structure defines the attibutes to be parsed below */ @@ -270,7 +298,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, /* verify we are not locked down on the MAC address */ if (is_valid_ether_addr(hw->mac.perm_addr) && - memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) + !ether_addr_equal(hw->mac.perm_addr, mac)) return FM10K_ERR_PARAM; /* add bit to notify us if this is a set or clear operation */ @@ -414,6 +442,7 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; + /* generate message requesting to change xcast mode */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); @@ -533,25 +562,25 @@ static const struct fm10k_msg_data fm10k_msg_data_vf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -static struct fm10k_mac_ops mac_ops_vf = { - .get_bus_info = &fm10k_get_bus_info_generic, - .reset_hw = &fm10k_reset_hw_vf, - .init_hw = &fm10k_init_hw_vf, - .start_hw = &fm10k_start_hw_generic, - .stop_hw = &fm10k_stop_hw_vf, - .update_vlan = &fm10k_update_vlan_vf, - .read_mac_addr = &fm10k_read_mac_addr_vf, - .update_uc_addr = &fm10k_update_uc_addr_vf, - .update_mc_addr = &fm10k_update_mc_addr_vf, - .update_xcast_mode = &fm10k_update_xcast_mode_vf, - .update_int_moderator = &fm10k_update_int_moderator_vf, - .update_lport_state = &fm10k_update_lport_state_vf, - .update_hw_stats = &fm10k_update_hw_stats_vf, - .rebind_hw_stats = &fm10k_rebind_hw_stats_vf, - .configure_dglort_map = &fm10k_configure_dglort_map_vf, - .get_host_state = &fm10k_get_host_state_generic, - .adjust_systime = &fm10k_adjust_systime_vf, - .read_systime = &fm10k_read_systime_vf, +static const struct fm10k_mac_ops mac_ops_vf = { + .get_bus_info = fm10k_get_bus_info_generic, + .reset_hw = fm10k_reset_hw_vf, + .init_hw = fm10k_init_hw_vf, + .start_hw = fm10k_start_hw_generic, + .stop_hw = fm10k_stop_hw_vf, + .update_vlan = fm10k_update_vlan_vf, + .read_mac_addr = fm10k_read_mac_addr_vf, + .update_uc_addr = fm10k_update_uc_addr_vf, + .update_mc_addr = fm10k_update_mc_addr_vf, + .update_xcast_mode = fm10k_update_xcast_mode_vf, + .update_int_moderator = fm10k_update_int_moderator_vf, + .update_lport_state = fm10k_update_lport_state_vf, + .update_hw_stats = fm10k_update_hw_stats_vf, + .rebind_hw_stats = fm10k_rebind_hw_stats_vf, + .configure_dglort_map = fm10k_configure_dglort_map_vf, + .get_host_state = fm10k_get_host_state_generic, + .adjust_systime = fm10k_adjust_systime_vf, + .read_systime = fm10k_read_systime_vf, }; static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) @@ -561,8 +590,8 @@ static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); } -struct fm10k_info fm10k_vf_info = { +const struct fm10k_info fm10k_vf_info = { .mac = fm10k_mac_vf, - .get_invariants = &fm10k_get_invariants_vf, + .get_invariants = fm10k_get_invariants_vf, .mac_ops = &mac_ops_vf, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index 06a99d794c99..c4439f1313a0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -74,5 +74,5 @@ extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; #define FM10K_VF_MSG_1588_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) -extern struct fm10k_info fm10k_vf_info; +extern const struct fm10k_info fm10k_vf_info; #endif /* _FM10K_VF_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4dd3e26129b4..68f2204ec6f3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -42,7 +42,6 @@ #include <linux/string.h> #include <linux/in.h> #include <linux/ip.h> -#include <linux/tcp.h> #include <linux/sctp.h> #include <linux/pkt_sched.h> #include <linux/ipv6.h> @@ -104,6 +103,7 @@ #define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) +#define I40E_PRIV_FLAGS_PS BIT(4) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -187,6 +187,7 @@ struct i40e_lump_tracking { #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -244,6 +245,11 @@ struct i40e_tc_configuration { struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; }; +struct i40e_udp_port_config { + __be16 index; + u8 type; +}; + /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; @@ -265,7 +271,7 @@ struct i40e_pf { u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ int queues_left; /* queues left unclaimed */ - u16 rss_size; /* num queues in the RSS array */ + u16 alloc_rss_size; /* allocated RSS queues */ u16 rss_size_max; /* HW defined max RSS queues */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u16 num_alloc_vsi; /* num VSIs this driver supports */ @@ -280,11 +286,9 @@ struct i40e_pf { u32 fd_atr_cnt; u32 fd_tcp_rule; -#ifdef CONFIG_I40E_VXLAN - __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; - u16 pending_vxlan_bitmap; + struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + u16 pending_udp_bitmap; -#endif enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; @@ -321,9 +325,7 @@ struct i40e_pf { #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) #define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26) -#ifdef CONFIG_I40E_VXLAN -#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27) -#endif +#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) #define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) #define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) @@ -335,7 +337,9 @@ struct i40e_pf { #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) +#define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) +#define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -412,7 +416,7 @@ struct i40e_pf { u32 rx_hwtstamp_cleared; bool ptp_tx; bool ptp_rx; - u16 rss_table_size; + u16 rss_table_size; /* HW RSS table size */ /* These are only valid in NPAR modes */ u32 npar_max_bw; u32 npar_min_bw; @@ -487,6 +491,7 @@ struct i40e_vsi { u32 tx_restart; u32 tx_busy; u64 tx_linearize; + u64 tx_force_wb; u32 rx_buf_failed; u32 rx_page_failed; @@ -504,8 +509,10 @@ struct i40e_vsi { u16 tx_itr_setting; u16 int_rate_limit; /* value in usecs */ - u16 rss_table_size; - u16 rss_size; + u16 rss_table_size; /* HW RSS table size */ + u16 rss_size; /* Allocated RSS queues */ + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ u16 max_frame; u16 rx_hdr_len; @@ -575,6 +582,9 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ +#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */ + unsigned long hung_detected; /* Set/Reset for hung_detection logic */ + cpumask_t affinity_mask; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; @@ -602,8 +612,8 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw) full_ver = hw->nvm.oem_ver; ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); - build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) - & I40E_OEM_VER_BUILD_MASK); + build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) & + I40E_OEM_VER_BUILD_MASK); patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); snprintf(buf, sizeof(buf), @@ -668,6 +678,8 @@ extern const char i40e_driver_name[]; extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); +int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_eth_stats(struct i40e_vsi *vsi); @@ -691,7 +703,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, bool is_vf, bool is_netdev); void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev); -int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl); +int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); @@ -709,7 +721,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, void i40e_veb_release(struct i40e_veb *veb); int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); -i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); +int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); void i40e_vsi_reset_stats(struct i40e_vsi *vsi); void i40e_pf_reset_stats(struct i40e_pf *pf); @@ -767,6 +779,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6584b6cd73fd..b22012a446a6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -227,6 +227,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1891,6 +1892,26 @@ struct i40e_aqc_nvm_config_data_immediate_field { I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ +struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2403,4 +2424,4 @@ struct i40e_aqc_debug_modify_internals { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); -#endif +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 2d74c6e4d7b6..6a034ddac36a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -44,7 +44,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d4b7af9a2fc8..10744a698d6f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -103,8 +103,8 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos += len; return len; @@ -353,8 +353,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos = len; return len; @@ -981,12 +981,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); - if (bytes_not_copied < 0) { + if (bytes_not_copied) { kfree(cmd_buf); - return bytes_not_copied; + return -EFAULT; } - if (bytes_not_copied > 0) - count -= bytes_not_copied; cmd_buf[count] = '\0'; cmd_buf_tmp = strchr(cmd_buf, '\n'); @@ -1140,7 +1138,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, ma, vlan, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); - ret = i40e_sync_vsi_filters(vsi, true); + ret = i40e_sync_vsi_filters(vsi); if (f && !ret) dev_info(&pf->pdev->dev, "add macaddr: %pM vlan=%d added to VSI %d\n", @@ -1179,7 +1177,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, ma, vlan, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); - ret = i40e_sync_vsi_filters(vsi, true); + ret = i40e_sync_vsi_filters(vsi); if (!ret) dev_info(&pf->pdev->dev, "del macaddr: %pM vlan=%d removed from VSI %d\n", @@ -2034,8 +2032,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); - if (bytes_not_copied < 0) - return bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; *ppos = len; return len; @@ -2068,10 +2066,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - else if (bytes_not_copied > 0) - count -= bytes_not_copied; + if (bytes_not_copied) + return -EFAULT; i40e_dbg_netdev_ops_buf[count] = '\0'; buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index c601ca4a610c..448ef4c17efb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 3f385ffe420f..29d5833e24a3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -88,6 +88,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), + I40E_VSI_STAT("tx_force_wb", tx_force_wb), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, @@ -230,6 +231,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "LinkPolling", "flow-director-atr", "veb-stats", + "packet-split", }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) @@ -2110,7 +2112,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = vsi->alloc_queue_pairs; + cmd->data = vsi->num_queue_pairs; ret = 0; break; case ETHTOOL_GRXFH: @@ -2583,7 +2585,6 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } -#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) /** * i40e_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure @@ -2611,10 +2612,9 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 reg_val; - int i, j; + u8 *lut, *seed = NULL; + int ret; + u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -2622,24 +2622,20 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - reg_val = rd32(hw, I40E_PFQF_HLUT(i)); - indir[j++] = reg_val & 0xff; - indir[j++] = (reg_val >> 8) & 0xff; - indir[j++] = (reg_val >> 16) & 0xff; - indir[j++] = (reg_val >> 24) & 0xff; - } + seed = key; + lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE); + if (ret) + goto out; + for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) + indir[i] = (u32)(lut[i]); - if (key) { - for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { - reg_val = rd32(hw, I40E_PFQF_HKEY(i)); - key[j++] = (u8)(reg_val & 0xff); - key[j++] = (u8)((reg_val >> 8) & 0xff); - key[j++] = (u8)((reg_val >> 16) & 0xff); - key[j++] = (u8)((reg_val >> 24) & 0xff); - } - } - return 0; +out: + kfree(lut); + + return ret; } /** @@ -2656,10 +2652,8 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 reg_val; - int i, j; + u8 *seed = NULL; + u16 i; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; @@ -2667,24 +2661,28 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - reg_val = indir[j++]; - reg_val |= indir[j++] << 8; - reg_val |= indir[j++] << 16; - reg_val |= indir[j++] << 24; - wr32(hw, I40E_PFQF_HLUT(i), reg_val); - } - if (key) { - for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { - reg_val = key[j++]; - reg_val |= key[j++] << 8; - reg_val |= key[j++] << 16; - reg_val |= key[j++] << 24; - wr32(hw, I40E_PFQF_HKEY(i), reg_val); + if (!vsi->rss_hkey_user) { + vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_hkey_user) + return -ENOMEM; } + memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE); + seed = vsi->rss_hkey_user; } - return 0; + if (!vsi->rss_lut_user) { + vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!vsi->rss_lut_user) + return -ENOMEM; + } + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) + vsi->rss_lut_user[i] = (u8)(indir[i]); + + return i40e_config_rss(vsi, seed, vsi->rss_lut_user, + I40E_HLUT_ARRAY_SIZE); } /** @@ -2712,6 +2710,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_FD_ATR : 0; ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? I40E_PRIV_FLAGS_VEB_STATS : 0; + ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? + I40E_PRIV_FLAGS_PS : 0; return ret_flags; } @@ -2726,6 +2726,26 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + bool reset_required = false; + + /* NOTE: MFP is not settable */ + + /* allow the user to control the method of receive + * buffer DMA, whether the packet is split at header + * boundaries into two separate buffers. In some cases + * one routine or the other will perform better. + */ + if ((flags & I40E_PRIV_FLAGS_PS) && + !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) { + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED; + reset_required = true; + } else if (!(flags & I40E_PRIV_FLAGS_PS) && + (pf->flags & I40E_FLAG_RX_PS_ENABLED)) { + pf->flags &= ~I40E_FLAG_RX_PS_ENABLED; + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; + reset_required = true; + } if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; @@ -2748,6 +2768,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) else pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + /* if needed, issue reset to cause things to take effect */ + if (reset_required) + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index fe5d9bf3ed6d..579a46ca82df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1544,8 +1544,6 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) return; - BUG_ON(!pf->vsi[pf->lan_vsi]); - for (i = 0; i < pf->num_alloc_vsi; i++) { vsi = pf->vsi[i]; if (vsi && vsi->type == I40E_VSI_FCOE) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 79ae7beeafe5..daa9204426d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -762,7 +762,7 @@ static void i40e_write_byte(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = BIT(ce_info->width) - 1; + mask = (u8)(BIT(ce_info->width) - 1); src_byte = *from; src_byte &= mask; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4a9873ec28c7..bb4612c159fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -24,12 +24,24 @@ * ******************************************************************************/ +#include <linux/etherdevice.h> +#include <linux/of_net.h> +#include <linux/pci.h> + +#ifdef CONFIG_SPARC +#include <asm/idprom.h> +#include <asm/prom.h> +#endif + /* Local includes */ #include "i40e.h" #include "i40e_diag.h" -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) #include <net/vxlan.h> #endif +#if IS_ENABLED(CONFIG_GENEVE) +#include <net/geneve.h> +#endif const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -38,8 +50,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 46 +#define DRV_VERSION_MINOR 4 +#define DRV_VERSION_BUILD 8 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -55,6 +67,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, + u16 rss_table_size, u16 rss_size); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); @@ -68,7 +82,6 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb); static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, @@ -790,75 +803,6 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) #endif /** - * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode - * @pf: the corresponding PF - * - * Update the Rx XOFF counter (PAUSE frames) in link flow control mode - **/ -static void i40e_update_link_xoff_rx(struct i40e_pf *pf) -{ - struct i40e_hw_port_stats *osd = &pf->stats_offsets; - struct i40e_hw_port_stats *nsd = &pf->stats; - struct i40e_hw *hw = &pf->hw; - u64 xoff = 0; - - if ((hw->fc.current_mode != I40E_FC_FULL) && - (hw->fc.current_mode != I40E_FC_RX_PAUSE)) - return; - - xoff = nsd->link_xoff_rx; - i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), - pf->stat_offsets_loaded, - &osd->link_xoff_rx, &nsd->link_xoff_rx); - - /* No new LFC xoff rx */ - if (!(nsd->link_xoff_rx - xoff)) - return; - -} - -/** - * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode - * @pf: the corresponding PF - * - * Update the Rx XOFF counter (PAUSE frames) in PFC mode - **/ -static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) -{ - struct i40e_hw_port_stats *osd = &pf->stats_offsets; - struct i40e_hw_port_stats *nsd = &pf->stats; - bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; - struct i40e_dcbx_config *dcb_cfg; - struct i40e_hw *hw = &pf->hw; - u16 i; - u8 tc; - - dcb_cfg = &hw->local_dcbx_config; - - /* Collect Link XOFF stats when PFC is disabled */ - if (!dcb_cfg->pfc.pfcenable) { - i40e_update_link_xoff_rx(pf); - return; - } - - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - u64 prio_xoff = nsd->priority_xoff_rx[i]; - - i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), - pf->stat_offsets_loaded, - &osd->priority_xoff_rx[i], - &nsd->priority_xoff_rx[i]); - - /* No new PFC xoff rx */ - if (!(nsd->priority_xoff_rx[i] - prio_xoff)) - continue; - /* Get the TC for given priority */ - tc = dcb_cfg->etscfg.prioritytable[i]; - xoff[tc] = true; - } -} - -/** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated * @@ -881,6 +825,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u64 bytes, packets; unsigned int start; u64 tx_linearize; + u64 tx_force_wb; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -899,7 +844,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) */ rx_b = rx_p = 0; tx_b = tx_p = 0; - tx_restart = tx_busy = tx_linearize = 0; + tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -917,6 +862,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; + tx_force_wb += p->tx_stats.tx_force_wb; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -934,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; + vsi->tx_force_wb = tx_force_wb; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1049,12 +996,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); - i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ + i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xoff_rx, &nsd->link_xoff_rx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); for (i = 0; i < 8; i++) { + i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xoff_rx[i], + &nsd->priority_xoff_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_rx[i], @@ -1317,6 +1270,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, } /** + * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS + * @vsi: the VSI to be searched + * @macaddr: the mac address to be removed + * @is_vf: true if it is a VF + * @is_netdev: true if it is a netdev + * + * Removes a given MAC address from a VSI, regardless of VLAN + * + * Returns 0 for success, or error + **/ +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f = NULL; + int changed = 0; + + WARN(!spin_is_locked(&vsi->mac_filter_list_lock), + "Missing mac_filter_list_lock\n"); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (is_vf == f->is_vf) && + (is_netdev == f->is_netdev)) { + f->counter--; + f->changed = true; + changed = 1; + } + } + if (changed) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + return 0; + } + return -ENOENT; +} + +/** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address @@ -1547,10 +1536,9 @@ static int i40e_set_mac(struct net_device *netdev, void *p) spin_unlock_bh(&vsi->mac_filter_list_lock); } - i40e_sync_vsi_filters(vsi, false); ether_addr_copy(netdev->dev_addr, addr->sa_data); - return 0; + return i40e_sync_vsi_filters(vsi); } /** @@ -1590,7 +1578,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ + if (enabled_tc & BIT(i)) /* TC is enabled */ numtc++; } if (!numtc) { @@ -1619,13 +1607,14 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ - if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { + if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */ int pow, num_qps; switch (vsi->type) { case I40E_VSI_MAIN: - qcount = min_t(int, pf->rss_size, num_tc_qps); + qcount = min_t(int, pf->alloc_rss_size, + num_tc_qps); break; #ifdef I40E_FCOE case I40E_VSI_FCOE: @@ -1851,13 +1840,12 @@ static void i40e_cleanup_add_list(struct list_head *add_list) /** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI - * @grab_rtnl: whether RTNL needs to be grabbed * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ -int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) +int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct list_head tmp_del_list, tmp_add_list; struct i40e_mac_filter *f, *ftmp, *fclone; @@ -1865,8 +1853,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; + i40e_status aq_ret = 0; bool err_cond = false; - i40e_status ret = 0; + int retval = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; @@ -1929,17 +1918,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } spin_unlock_bh(&vsi->mac_filter_list_lock); - if (err_cond) + if (err_cond) { i40e_cleanup_add_list(&tmp_add_list); + retval = -ENOMEM; + goto out; + } } /* Now process 'del_list' outside the lock */ if (!list_empty(&tmp_del_list)) { + int del_list_size; + filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); + del_list_size = filter_list_len * + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kzalloc(del_list_size, GFP_KERNEL); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); @@ -1948,7 +1942,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); - return -ENOMEM; + retval = -ENOMEM; + goto out; } list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { @@ -1966,18 +1961,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) /* flush a full buffer */ if (num_del == filter_list_len) { - ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, del_list, num_del, - NULL); + aq_ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, + del_list, + num_del, + NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; - memset(del_list, 0, sizeof(*del_list)); + memset(del_list, 0, del_list_size); - if (ret && aq_err != I40E_AQ_RC_ENOENT) + if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { + retval = -EIO; dev_err(&pf->pdev->dev, "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); + } } /* Release memory for MAC filter entries which were * synced up with HW. @@ -1987,15 +1986,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (num_del) { - ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, NULL); + aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + del_list, num_del, + NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; - if (ret && aq_err != I40E_AQ_RC_ENOENT) + if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); } @@ -2004,13 +2004,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (!list_empty(&tmp_add_list)) { + int add_list_size; /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), - add_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_add_macvlan_element_data), - GFP_KERNEL); + add_list_size = filter_list_len * + sizeof(struct i40e_aqc_add_macvlan_element_data); + add_list = kzalloc(add_list_size, GFP_KERNEL); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); @@ -2019,7 +2020,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) spin_lock_bh(&vsi->mac_filter_list_lock); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); - return -ENOMEM; + retval = -ENOMEM; + goto out; } list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { @@ -2040,15 +2042,15 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) /* flush a full buffer */ if (num_add == filter_list_len) { - ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, - NULL); + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; - if (ret) + if (aq_ret) break; - memset(add_list, 0, sizeof(*add_list)); + memset(add_list, 0, add_list_size); } /* Entries from tmp_add_list were cloned from MAC * filter list, hence clean those cloned entries @@ -2058,18 +2060,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (num_add) { - ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, NULL); + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; } kfree(add_list); add_list = NULL; - if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { + if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { + retval = i40e_aq_rc_to_posix(aq_ret, aq_err); dev_info(&pf->pdev->dev, "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, @@ -2087,16 +2090,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); - ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_multipromisc, - NULL); - if (ret) + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; @@ -2112,44 +2118,50 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - if (grab_rtnl) - i40e_do_reset_safe(pf, - BIT(__I40E_PF_RESET_REQUESTED)); - else - i40e_do_reset(pf, - BIT(__I40E_PF_RESET_REQUESTED)); + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } } else { - ret = i40e_aq_set_vsi_unicast_promiscuous( + aq_ret = i40e_aq_set_vsi_unicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); - if (ret) + if (aq_ret) { + retval = + i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set unicast promisc failed, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); - ret = i40e_aq_set_vsi_multicast_promiscuous( + aq_ret, pf->hw.aq.asq_last_status); + } + aq_ret = i40e_aq_set_vsi_multicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); - if (ret) + if (aq_ret) { + retval = + i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multicast promisc failed, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + aq_ret, pf->hw.aq.asq_last_status); + } } - ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (ret) + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set brdcast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + } } - +out: clear_bit(__I40E_CONFIG_BUSY, &vsi->state); - return 0; + return retval; } /** @@ -2166,8 +2178,15 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) - i40e_sync_vsi_filters(pf->vsi[v], true); + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { + int ret = i40e_sync_vsi_filters(pf->vsi[v]); + + if (ret) { + /* come back and try again later */ + pf->flags |= I40E_FLAG_FILTER_SYNC; + break; + } + } } } @@ -2377,16 +2396,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function will lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2459,16 +2475,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function with lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2711,6 +2724,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) netif_set_xps_queue(ring->netdev, mask, ring->queue_index); free_cpumask_var(mask); } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** @@ -4360,17 +4378,41 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); + /* Bail out if interrupts are disabled because napi_poll + * execution in-progress or will get scheduled soon. + * napi_poll cleans TX and RX queues and updates 'next_to_clean'. + */ + if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) + return; + head = i40e_get_head(tx_ring); tx_pending = i40e_get_tx_pending(tx_ring); - /* Interrupts are disabled and TX pending is non-zero, - * trigger the SW interrupt (don't wait). Worst case - * there will be one extra interrupt which may result - * into not cleaning any queues because queues are cleaned. + /* HW is done executing descriptors, updated HEAD write back, + * but SW hasn't processed those descriptors. If interrupt is + * not generated from this point ON, it could result into + * dev_watchdog detecting timeout on those netdev_queue, + * hence proactively trigger SW interrupt. */ - if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) - i40e_force_wb(vsi, tx_ring->q_vector); + if (tx_pending) { + /* NAPI Poll didn't run and clear since it was set */ + if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, + &tx_ring->q_vector->hung_detected)) { + netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", + vsi->seid, q_idx, tx_pending, + tx_ring->next_to_clean, head, + tx_ring->next_to_use, + readl(tx_ring->tail)); + netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", + vsi->seid, q_idx, val); + i40e_force_wb(vsi, tx_ring->q_vector); + } else { + /* First Chance - detected possible hung */ + set_bit(I40E_Q_VECTOR_HUNG_DETECT, + &tx_ring->q_vector->hung_detected); + } + } } /** @@ -4441,7 +4483,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= BIT_ULL(tc); + enabled_tc |= BIT(tc); break; } } @@ -4525,7 +4567,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) num_tc++; } return num_tc; @@ -4547,7 +4589,7 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) /* Find the first enabled TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) break; } @@ -4707,7 +4749,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ - if (vsi->tc_config.enabled_tc & BIT_ULL(i)) + if (vsi->tc_config.enabled_tc & BIT(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, @@ -4769,7 +4811,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) bw_share[i] = 1; } @@ -4843,7 +4885,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT_ULL(i)) + if (enabled_tc & BIT(i)) bw_data.tc_bw_share_credits[i] = 1; } @@ -5240,7 +5282,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) /* Generate TC map for number of tc requested */ for (i = 0; i < tc; i++) - enabled_tc |= BIT_ULL(i); + enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc) @@ -5305,6 +5347,9 @@ int i40e_open(struct net_device *netdev) #ifdef CONFIG_I40E_VXLAN vxlan_get_rx_port(netdev); #endif +#ifdef CONFIG_I40E_GENEVE + geneve_get_rx_port(netdev); +#endif return 0; } @@ -5738,7 +5783,7 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, **/ static void i40e_service_event_complete(struct i40e_pf *pf) { - BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); + WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); /* flush memory to make sure state is correct before next watchog */ smp_mb__before_atomic(); @@ -6013,6 +6058,9 @@ static void i40e_link_event(struct i40e_pf *pf) i40e_status status; bool new_link, old_link; + /* save off old link status information */ + pf->hw.phy.link_info_old = pf->hw.phy.link_info; + /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; @@ -6101,23 +6149,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf) rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); + reset_flags |= BIT(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); + reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); + reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); + reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); + reset_flags |= BIT(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, &pf->state); } @@ -6147,13 +6195,9 @@ unlock: static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { - struct i40e_hw *hw = &pf->hw; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; - /* save off old link status information */ - hw->phy.link_info_old = hw->phy.link_info; - /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct * This completely ignores any state information @@ -6192,15 +6236,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { - dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; } if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { - dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { - dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); + if (hw->debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) @@ -6209,15 +6256,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val = rd32(&pf->hw, pf->hw.aq.asq.len); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { - dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; } if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { - dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; } if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { - dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); + if (pf->hw.debug_mask & I40E_DEBUG_AQ) + dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) @@ -6268,6 +6318,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) break; case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: + case i40e_aqc_opc_oem_post_update: i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); break; default: @@ -6685,6 +6736,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) struct i40e_hw *hw = &pf->hw; u8 set_fc_aq_fail = 0; i40e_status ret; + u32 val; u32 v; /* Now we wait for GRST to settle out. @@ -6823,6 +6875,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ +#define I40E_REG_MSS 0x000E64DC +#define I40E_REG_MSS_MIN_MASK 0x3FF0000 +#define I40E_64BYTE_MSS 0x400000 + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); @@ -6984,30 +7050,30 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } -#ifdef CONFIG_I40E_VXLAN /** - * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW + * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ -static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) +static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; int i; - if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) + if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) return; - pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; + pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { - pf->pending_vxlan_bitmap &= ~BIT_ULL(i); - port = pf->vxlan_ports[i]; + if (pf->pending_udp_bitmap & BIT_ULL(i)) { + pf->pending_udp_bitmap &= ~BIT_ULL(i); + port = pf->udp_ports[i].index; if (port) ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), - I40E_AQC_TUNNEL_TYPE_VXLAN, + pf->udp_ports[i].type, NULL, NULL); else ret = i40e_aq_del_udp_tunnel(hw, i, NULL); @@ -7020,13 +7086,13 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->vxlan_ports[i] = 0; + pf->udp_ports[i].index = 0; } } } +#endif } -#endif /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data @@ -7051,8 +7117,8 @@ static void i40e_service_task(struct work_struct *work) i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); i40e_sync_filters_subtask(pf); -#ifdef CONFIG_I40E_VXLAN - i40e_sync_vxlan_filters_subtask(pf); +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) + i40e_sync_udp_filters_subtask(pf); #endif i40e_clean_adminq_subtask(pf); @@ -7282,6 +7348,23 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) } /** + * i40e_clear_rss_config_user - clear the user configured RSS hash keys + * and lookup table + * @vsi: Pointer to VSI structure + */ +static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) +{ + if (!vsi) + return; + + kfree(vsi->rss_hkey_user); + vsi->rss_hkey_user = NULL; + + kfree(vsi->rss_lut_user); + vsi->rss_lut_user = NULL; +} + +/** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured **/ @@ -7318,6 +7401,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); i40e_vsi_free_arrays(vsi, true); + i40e_clear_rss_config_user(vsi); pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) @@ -7780,7 +7864,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) * @vsi: vsi structure * @seed: RSS hash seed **/ -static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { struct i40e_aqc_get_set_rss_key_data rss_key; struct i40e_pf *pf = vsi->back; @@ -7833,43 +7918,57 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) { u8 seed[I40E_HKEY_ARRAY_SIZE]; struct i40e_pf *pf = vsi->back; + u8 *lut; + int ret; - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); - vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) + return 0; - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) - return i40e_config_rss_aq(vsi, seed); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; - return 0; + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); + ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; } /** - * i40e_config_rss_reg - Prepare for RSS if used - * @pf: board private structure + * i40e_config_rss_reg - Configure RSS keys and lut by writing registers + * @vsi: Pointer to vsi structure * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure **/ -static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) +static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - u32 *seed_dw = (u32 *)seed; - u32 current_queue = 0; - u32 lut = 0; - int i, j; + u8 i; /* Fill out hash function seed */ - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + if (seed) { + u32 *seed_dw = (u32 *)seed; - for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (current_queue == vsi->rss_size) - current_queue = 0; - lut |= ((current_queue) << (8 * j)); - current_queue++; - } - wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + } + + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); } i40e_flush(hw); @@ -7877,18 +7976,101 @@ static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) } /** - * i40e_config_rss - Prepare for RSS if used + * i40e_get_rss_reg - Get the RSS keys and lut by reading registers + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the keys + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries + * + * Returns 0 on success, negative on failure + */ +static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); + } + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); + } + + return 0; +} + +/** + * i40e_config_rss - Configure RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + */ +int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(vsi, seed, lut, lut_size); + else + return i40e_config_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40e_get_rss - Get RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the keys + * @lut: Buffer to store the lookup table entries + * lut_size: Size of buffer to store the lookup table entries + * + * Returns 0 on success, negative on failure + */ +int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + return i40e_get_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40e_fill_rss_lut - Fill the RSS lookup table with default values + * @pf: Pointer to board private structure + * @lut: Lookup table + * @rss_table_size: Lookup table size + * @rss_size: Range of queue number for hashing + */ +static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, + u16 rss_table_size, u16 rss_size) +{ + u16 i; + + for (i = 0; i < rss_table_size; i++) + lut[i] = i % rss_size; +} + +/** + * i40e_pf_config_rss - Prepare for RSS if used * @pf: board private structure **/ -static int i40e_config_rss(struct i40e_pf *pf) +static int i40e_pf_config_rss(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 seed[I40E_HKEY_ARRAY_SIZE]; + u8 *lut; struct i40e_hw *hw = &pf->hw; u32 reg_val; u64 hena; - - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | @@ -7898,8 +8080,6 @@ static int i40e_config_rss(struct i40e_pf *pf) wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); - vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); - /* Determine the RSS table size based on the hardware capabilities */ reg_val = rd32(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? @@ -7907,10 +8087,32 @@ static int i40e_config_rss(struct i40e_pf *pf) (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); wr32(hw, I40E_PFQF_CTL_0, reg_val); - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) - return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); + /* Determine the RSS size of the VSI */ + if (!vsi->rss_size) + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Use user configured lut if there is one, otherwise use default */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + + /* Use user configured hash key if there is one, otherwise + * use default. + */ + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else - return i40e_config_rss_reg(pf, seed); + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; } /** @@ -7935,13 +8137,28 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); - pf->rss_size = new_rss_size; + pf->alloc_rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); - i40e_config_rss(pf); + + /* Discard the user configured hash keys and lut, if less + * queues are enabled. + */ + if (queue_count < vsi->rss_size) { + i40e_clear_rss_config_user(vsi); + dev_dbg(&pf->pdev->dev, + "discard user configured hash keys and lut\n"); + } + + /* Reset vsi->rss_size, as number of enabled queues changed */ + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + + i40e_pf_config_rss(pf); } - dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); - return pf->rss_size; + dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", + pf->alloc_rss_size, pf->rss_size_max); + return pf->alloc_rss_size; } /** @@ -8112,13 +8329,14 @@ static int i40e_sw_init(struct i40e_pf *pf) * maximum might end up larger than the available queues */ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); - pf->rss_size = 1; + pf->alloc_rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; - pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); + pf->alloc_rss_size = min_t(int, pf->rss_size_max, + num_online_cpus()); } /* MFP mode enabled */ @@ -8176,7 +8394,8 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_HW_ATR_EVICT_CAPABLE | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | - I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; + I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; } pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; @@ -8275,26 +8494,27 @@ static int i40e_set_features(struct net_device *netdev, return 0; } -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** - * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port + * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure * @port: The UDP port to look up * * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found **/ -static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) +static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) { u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->vxlan_ports[i] == port) + if (pf->udp_ports[i].index == port) return i; } return i; } +#endif /** * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * @netdev: This physical port's netdev @@ -8304,6 +8524,7 @@ static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) static void i40e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { +#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8313,7 +8534,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, if (sa_family == AF_INET6) return; - idx = i40e_get_vxlan_port_idx(pf, port); + idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { @@ -8323,7 +8544,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, } /* Now check if there is space to add the new port */ - next_idx = i40e_get_vxlan_port_idx(pf, 0); + next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", @@ -8332,9 +8553,11 @@ static void i40e_add_vxlan_port(struct net_device *netdev, } /* New port: add it and mark its index in the bitmap */ - pf->vxlan_ports[next_idx] = port; - pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + pf->pending_udp_bitmap |= BIT_ULL(next_idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; +#endif } /** @@ -8346,6 +8569,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, static void i40e_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { +#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8354,23 +8578,108 @@ static void i40e_del_vxlan_port(struct net_device *netdev, if (sa_family == AF_INET6) return; - idx = i40e_get_vxlan_port_idx(pf, port); + idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { /* if port exists, set it to 0 (mark for deletion) * and make it pending */ - pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } else { netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } +#endif +} + +/** + * i40e_add_geneve_port - Get notifications about GENEVE ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: New UDP port number that GENEVE started listening to + **/ +static void i40e_add_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ +#if IS_ENABLED(CONFIG_GENEVE) + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 next_idx; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_udp_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "udp port %d already offloaded\n", + ntohs(port)); + return; + } + + /* Now check if there is space to add the new port */ + next_idx = i40e_get_udp_port_idx(pf, 0); + + if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", + ntohs(port)); + return; + } + + /* New port: add it and mark its index in the bitmap */ + pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + pf->pending_udp_bitmap |= BIT_ULL(next_idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); +#endif } +/** + * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that GENEVE is notifying us about + * @port: UDP port number that GENEVE stopped listening to + **/ +static void i40e_del_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ +#if IS_ENABLED(CONFIG_GENEVE) + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_udp_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "deleting geneve port %d\n", + ntohs(port)); + } else { + netdev_warn(netdev, "geneve port %d was not found, not deleting\n", + ntohs(port)); + } #endif +} + static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { @@ -8548,7 +8857,10 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, nlflags, 0, 0, filter_mask, NULL); } -#define I40E_MAX_TUNNEL_HDR_LEN 80 +/* Hardware supports L4 tunnel length of 128B (=2^7) which includes + * inner mac plus all inner ethertypes. + */ +#define I40E_MAX_TUNNEL_HDR_LEN 128 /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff @@ -8560,9 +8872,9 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb, netdev_features_t features) { if (skb->encapsulation && - (skb_inner_mac_header(skb) - skb_transport_header(skb) > + ((skb_inner_network_header(skb) - skb_transport_header(skb)) > I40E_MAX_TUNNEL_HDR_LEN)) - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } @@ -8595,10 +8907,14 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, -#ifdef CONFIG_I40E_VXLAN +#if IS_ENABLED(CONFIG_VXLAN) .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, #endif +#if IS_ENABLED(CONFIG_GENEVE) + .ndo_add_geneve_port = i40e_add_geneve_port, + .ndo_del_geneve_port = i40e_del_geneve_port, +#endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, @@ -8632,13 +8948,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np->vsi = vsi; netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | + NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | @@ -9051,7 +9368,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) f->is_vf, f->is_netdev); spin_unlock_bh(&vsi->mac_filter_list_lock); - i40e_sync_vsi_filters(vsi, false); + i40e_sync_vsi_filters(vsi); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); @@ -9213,6 +9530,44 @@ err_vsi: } /** + * i40e_macaddr_init - explicitly write the mac address filters. + * + * @vsi: pointer to the vsi. + * @macaddr: the MAC address + * + * This is needed when the macaddr has been obtained by other + * means than the default, e.g., from Open Firmware or IDPROM. + * Returns 0 on success, negative on failure + **/ +static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) +{ + int ret; + struct i40e_aqc_add_macvlan_element_data element; + + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + macaddr, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Addr change for VSI failed: %d\n", ret); + return -EADDRNOTAVAIL; + } + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add filter failed err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); + } + return ret; +} + +/** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type @@ -9336,6 +9691,17 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: + /* Apply relevant filters if a platform-specific mac + * address was selected. + */ + if (!!(pf->flags & I40E_FLAG_PF_MAC)) { + ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); + if (ret) { + dev_warn(&pf->pdev->dev, + "could not set up macaddr; err %d\n", + ret); + } + } case I40E_VSI_VMDQ2: case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); @@ -9947,7 +10313,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) * the hash */ if ((pf->flags & I40E_FLAG_RSS_ENABLED)) - i40e_config_rss(pf); + i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_update_link_info(&pf->hw); @@ -9985,7 +10351,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { /* one qp for PF, no queues for anything else */ queues_left = 0; - pf->rss_size = pf->num_lan_qps = 1; + pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | @@ -10002,7 +10368,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE))) { /* one qp for PF */ - pf->rss_size = pf->num_lan_qps = 1; + pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; pf->flags &= ~(I40E_FLAG_RSS_ENABLED | @@ -10072,8 +10438,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), - pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, - pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); + pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, + pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, + queues_left); #ifdef I40E_FCOE dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); #endif @@ -10111,55 +10478,86 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) } #define INFO_STRING_LEN 255 +#define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - char *buf, *string; + char *buf; + int i; - string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); - if (!string) { - dev_err(&pf->pdev->dev, "Features string allocation failed\n"); + buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!buf) return; - } - buf = string; - - buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); + i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); #ifdef CONFIG_PCI_IOV - buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); + i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif - buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", - pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs, - pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); + i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) - buf += sprintf(buf, "RSS "); + i += snprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) - buf += sprintf(buf, "FD_ATR "); + i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { - buf += sprintf(buf, "FD_SB "); - buf += sprintf(buf, "NTUPLE "); + i += snprintf(&buf[i], REMAIN(i), " FD_SB"); + i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); } if (pf->flags & I40E_FLAG_DCB_CAPABLE) - buf += sprintf(buf, "DCB "); + i += snprintf(&buf[i], REMAIN(i), " DCB"); #if IS_ENABLED(CONFIG_VXLAN) - buf += sprintf(buf, "VxLAN "); + i += snprintf(&buf[i], REMAIN(i), " VxLAN"); +#endif +#if IS_ENABLED(CONFIG_GENEVE) + i += snprintf(&buf[i], REMAIN(i), " Geneve"); #endif if (pf->flags & I40E_FLAG_PTP) - buf += sprintf(buf, "PTP "); + i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) - buf += sprintf(buf, "FCOE "); + i += snprintf(&buf[i], REMAIN(i), " FCOE"); #endif if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) - buf += sprintf(buf, "VEB "); + i += snprintf(&buf[i], REMAIN(i), " VEB"); else - buf += sprintf(buf, "VEPA "); + i += snprintf(&buf[i], REMAIN(i), " VEPA"); - BUG_ON(buf > (string + INFO_STRING_LEN)); - dev_info(&pf->pdev->dev, "%s\n", string); - kfree(string); + dev_info(&pf->pdev->dev, "%s\n", buf); + kfree(buf); + WARN_ON(i > INFO_STRING_LEN); +} + +/** + * i40e_get_platform_mac_addr - get platform-specific MAC address + * + * @pdev: PCI device information struct + * @pf: board private structure + * + * Look up the MAC address in Open Firmware on systems that support it, + * and use IDPROM on SPARC if no OF address is found. On return, the + * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value + * has been selected. + **/ +static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) +{ + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + u8 *mac_addr = pf->hw.mac.addr; + + pf->flags &= ~I40E_FLAG_PF_MAC; + addr = of_get_mac_address(dp); + if (addr) { + ether_addr_copy(mac_addr, addr); + pf->flags |= I40E_FLAG_PF_MAC; +#ifdef CONFIG_SPARC + } else { + ether_addr_copy(mac_addr, idprom->id_ethaddr); + pf->flags |= I40E_FLAG_PF_MAC; +#endif /* CONFIG_SPARC */ + } } /** @@ -10183,6 +10581,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 link_status; int err; u32 len; + u32 val; u32 i; u8 set_fc_aq_fail; @@ -10302,6 +10701,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_init(&hw->aq.arq_mutex); err = i40e_init_adminq(hw); + if (err) { + if (err == I40E_ERR_FIRMWARE_API_VERSION) + dev_info(&pdev->dev, + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + else + dev_info(&pdev->dev, + "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); + + goto err_pf_reset; + } /* provide nvm, fw, api versions */ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", @@ -10309,12 +10718,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->aq.api_maj_ver, hw->aq.api_min_ver, i40e_nvm_version_str(hw)); - if (err) { - dev_info(&pdev->dev, - "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); - goto err_pf_reset; - } - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) dev_info(&pdev->dev, @@ -10367,6 +10770,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } i40e_get_mac_addr(hw, hw->mac.addr); + /* allow a platform config to override the HW addr */ + i40e_get_platform_mac_addr(pdev, pf); if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; @@ -10411,7 +10816,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) + if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) pf->wol_en = false; else pf->wol_en = true; @@ -10493,6 +10898,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 635b3ac17877..720516b0e8ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "Filter deleted for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -322,7 +328,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, * @fd_data: the flow director data required for the FDir descriptor * @add: true adds a filter, false removes it * - * Always returns -EOPNOTSUPP + * Returns 0 if the filters were successfully added or removed **/ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, struct i40e_fdir_filter *fd_data, @@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, } } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -506,9 +515,6 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; } - } else { - dev_info(&pdev->dev, - "FD filter programming failed due to incorrect filter parameters\n"); } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) @@ -526,11 +532,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -542,6 +544,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -1374,7 +1380,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN traffic has an outer UDPv4 checksum we need to check + /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check * it in the driver, hardware does not do it for us. * Since L3L4P bit was set we assume a valid IHL value (>=5) * so the total length of IPv4 header is IHL*4 bytes @@ -1416,31 +1422,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1458,6 +1445,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -1606,8 +1617,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1632,7 +1643,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } #endif - skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_desc->wb.qword1.status_error_len = 0; @@ -1736,8 +1746,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1864,7 +1873,6 @@ enable_int: q_vector->itr_countdown--; else q_vector->itr_countdown = ITR_COUNTDOWN_START; - } /** @@ -1892,12 +1900,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) return 0; } + /* Clear hung_detected bit */ + clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected); /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb |= ring->arm_wb; + arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1926,8 +1936,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { tx_only: - if (arm_wb) + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; i40e_force_wb(vsi, q_vector); + } return budget; } @@ -1993,7 +2005,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; - if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) { + if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) { /* snag network header to get L4 type and address */ hdr.network = skb_network_header(skb); @@ -2078,7 +2090,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) + if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) dtype_cmd |= ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & @@ -2187,14 +2199,12 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header - * @cd_type_cmd_tso_mss: ptr to u64 object - * @cd_tunneling: ptr to context descriptor bits + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss, - u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -2247,7 +2257,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information - * @cd_type_cmd_tso_mss: ptr to u64 object + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ @@ -2313,7 +2323,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, oudph = udp_hdr(skb); oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; @@ -2807,6 +2817,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, int tsyn; int tso; + /* prefetch the data, we'll need it later */ + prefetch(skb->data); + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -2826,8 +2839,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, - &cd_type_cmd_tso_mss, &cd_tunneling); + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 6779fb771d6a..3f081e25e097 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -163,7 +163,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FSO BIT(7) #define I40E_TX_FLAGS_TSYN BIT(8) #define I40E_TX_FLAGS_FD_SB BIT(9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -202,6 +202,7 @@ struct i40e_tx_queue_stats { u64 tx_busy; u64 tx_done_old; u64 tx_linearize; + u64 tx_force_wb; }; struct i40e_rx_queue_stats { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index ae879826084b..3226946bf3d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 44462b40f2d7..63e62f9aec6e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -290,8 +290,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, next_q = find_first_bit(&linklistmap, (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)); - vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; - qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; + vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; + qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); @@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); spin_lock_bh(&vsi->mac_filter_list_lock); - f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); - if (!f) - dev_info(&pf->pdev->dev, - "Could not allocate VF MAC addr\n"); + if (is_valid_ether_addr(vf->default_lan_addr.addr)) { + f = i40e_add_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not add MAC filter %pM for VF %d\n", + vf->default_lan_addr.addr, vf->vf_id); + } f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); @@ -565,7 +568,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) } /* program mac filter */ - ret = i40e_sync_vsi_filters(vsi, false); + ret = i40e_sync_vsi_filters(vsi); if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -1094,8 +1097,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_invalid_msgs++; - dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", - v_opcode, v_retval); + dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n", + vf->vf_id, v_opcode, v_retval); if (vf->num_invalid_msgs > I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { dev_err(&pf->pdev->dev, @@ -1623,7 +1626,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (!f) { dev_err(&pf->pdev->dev, - "Unable to add VF MAC filter\n"); + "Unable to add MAC filter %pM for VF %d\n", + al->list[i].addr, vf->vf_id); ret = I40E_ERR_PARAM; spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; @@ -1632,8 +1636,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi, false)) - dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); error_param: /* send the response to the VF */ @@ -1669,8 +1675,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < al->num_elements; i++) { if (is_broadcast_ether_addr(al->list[i].addr) || is_zero_ether_addr(al->list[i].addr)) { - dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", - al->list[i].addr); + dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", + al->list[i].addr, vf->vf_id); ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } @@ -1680,13 +1686,19 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) - i40e_del_filter(vsi, al->list[i].addr, - I40E_VLAN_ANY, true, false); + if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + spin_unlock_bh(&vsi->mac_filter_list_lock); + goto error_param; + } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ - if (i40e_sync_vsi_filters(vsi, false)) - dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", + vf->vf_id, ret); error_param: /* send the response to the VF */ @@ -1740,8 +1752,8 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) dev_err(&pf->pdev->dev, - "Unable to add VF vlan filter %d, error %d\n", - vfl->vlan_id[i], ret); + "Unable to add VLAN filter %d for VF %d, error %d\n", + vfl->vlan_id[i], vf->vf_id, ret); } error_param: @@ -1792,8 +1804,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) dev_err(&pf->pdev->dev, - "Unable to delete VF vlan filter %d, error %d\n", - vfl->vlan_id[i], ret); + "Unable to delete VLAN filter %d for VF %d, error %d\n", + vfl->vlan_id[i], vf->vf_id, ret); } error_param: @@ -2066,15 +2078,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, - "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } - if (!is_valid_ether_addr(mac)) { + if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, - "Invalid VF ethernet address\n"); + "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); ret = -EINVAL; goto error_param; } @@ -2085,9 +2097,10 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); + if (!is_zero_ether_addr(vf->default_lan_addr.addr)) + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); /* Delete all the filters for this VSI - we're going to kill it * anyway. @@ -2099,7 +2112,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ - if (i40e_sync_vsi_filters(vsi, false)) { + if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); ret = -EIO; goto error_param; @@ -2150,8 +2163,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_pvid; } @@ -2270,8 +2284,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error; } @@ -2344,8 +2359,9 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); - ret = -EINVAL; + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; goto error_param; } @@ -2460,6 +2476,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", + vf_id); + ret = -EAGAIN; + goto out; + } if (enable == vf->spoofchk) goto out; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index fcb9ef34cc7a..f5b2b369dc7c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -227,6 +227,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1888,6 +1889,26 @@ struct i40e_aqc_nvm_config_data_immediate_field { I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ + struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) @@ -2311,4 +2332,4 @@ struct i40e_aqc_debug_modify_internals { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); -#endif +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 72b1942a94aa..938783e0baac 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -44,7 +44,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index e6a39c9862e8..ca8b58c3d1f5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -30,7 +30,6 @@ /* Device IDs */ #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 47e9a90d6b10..7a00657dacda 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -127,17 +127,24 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of + * i40evf_get_tx_pending - how many Tx descriptors not processed + * @tx_ring: the ring of descriptors * - * Returns value of Tx ring head based on value stored - * in head write-back location + * Since there is no access to the ring head register + * in XL710, we need to use our local copies **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +u32 i40evf_get_tx_pending(struct i40e_ring *ring) { - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); - return le32_to_cpu(*(volatile __le32 *)head); + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } #define WB_STRIDE 0x3 @@ -245,16 +252,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - /* check to see if there are any non-cache aligned descriptors - * waiting to be written back, and kick the hardware to force - * them to be written back in case of napi polling - */ - if (budget && - !((i & WB_STRIDE) == WB_STRIDE) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), total_packets, total_bytes); @@ -414,7 +411,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) return false; } -/* +/** * i40evf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * @@ -889,31 +886,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -931,6 +909,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -1071,8 +1073,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1090,7 +1092,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } #endif - skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_desc->wb.qword1.status_error_len = 0; @@ -1189,8 +1190,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1263,10 +1263,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, rx = i40e_set_new_dynamic_itr(&q_vector->rx); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { tx = i40e_set_new_dynamic_itr(&q_vector->tx); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); } + if (rx || tx) { /* get the higher of the two ITR adjustments and * use the same value for both ITR registers @@ -1302,7 +1304,6 @@ enable_int: q_vector->itr_countdown--; else q_vector->itr_countdown = ITR_COUNTDOWN_START; - } /** @@ -1335,7 +1336,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb |= ring->arm_wb; + arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1364,8 +1365,10 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { tx_only: - if (arm_wb) + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; i40evf_force_wb(vsi, q_vector); + } return budget; } @@ -1437,13 +1440,12 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header - * @cd_tunneling: ptr to context descriptor bits + * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss, - u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1555,7 +1557,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags |= I40E_TX_FLAGS_IPV6; } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { @@ -1654,7 +1655,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } - /** +/** * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information @@ -1770,6 +1771,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 td_tag = 0; dma_addr_t dma; u16 gso_segs; + u16 desc_count = 0; + bool tail_bump = true; + bool do_rs = false; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; @@ -1810,6 +1814,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1829,6 +1835,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1843,35 +1851,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - /* Place RS bit on last descriptor of any packet that spans across the - * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. - */ -#define WB_STRIDE 0x3 - if (((i & WB_STRIDE) != WB_STRIDE) && - (first <= &tx_ring->tx_bi[i]) && - (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << - I40E_TXD_QW1_CMD_SHIFT); - } else { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << - I40E_TXD_QW1_CMD_SHIFT); - } - - netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index), - first->bytecount); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -1881,15 +1860,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* Algorithm to optimize tail and RS bit setting: + * if xmit_more is supported + * if xmit_more is true + * do not update tail and do not mark RS bit. + * if xmit_more is false and last xmit_more was false + * if every packet spanned less than 4 desc + * then set RS bit on 4th packet and update tail + * on every packet + * else + * update tail and set RS bit on every packet. + * if xmit_more is false and last_xmit_more was true + * update tail and set RS bit. + * + * Optimization: wmb to be issued only in case of tail update. + * Also optimize the Descriptor WB path for RS bit with the same + * algorithm. + * + * Note: If there are less than 4 packets + * pending and interrupts were disabled the service task will + * trigger a force WB. + */ + if (skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) { + tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + tail_bump = false; + } else if (!skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)) && + (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && + (tx_ring->packet_stride < WB_STRIDE) && + (desc_count < WB_STRIDE)) { + tx_ring->packet_stride++; + } else { + tx_ring->packet_stride = 0; + tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + do_rs = true; + } + if (do_rs) + tx_ring->packet_stride = 0; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD : + I40E_TX_DESC_CMD_EOP) << + I40E_TXD_QW1_CMD_SHIFT); + /* notify HW of packet */ - if (!skb->xmit_more || - netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index))) - writel(i, tx_ring->tail); - else + if (!tail_bump) prefetchw(tx_desc + 1); + if (tail_bump) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, tx_ring->tail); + } + return; dma_error: @@ -1961,6 +1997,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u8 hdr_len = 0; int tso; + /* prefetch the data, we'll need it later */ + prefetch(skb->data); + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -1980,8 +2019,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, - &cd_type_cmd_tso_mss, &cd_tunneling); + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2029,7 +2067,7 @@ out_drop: netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; + struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index ebc1bf77f036..e29bb3e86cfd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -201,6 +201,7 @@ struct i40e_tx_queue_stats { u64 tx_busy; u64 tx_done_old; u64 tx_linearize; + u64 tx_force_wb; }; struct i40e_rx_queue_stats { @@ -267,6 +268,8 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u8 packet_stride; +#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) @@ -321,4 +324,19 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); +u32 i40evf_get_tx_pending(struct i40e_ring *ring); + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: Tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 9f7b279b9d9c..3b9d2037456c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 22fc3d49c4b9..be1b72b93888 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -67,6 +67,8 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; u16 qs_handle; + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -95,10 +97,10 @@ struct i40e_vsi { #define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define MAX_RX_QUEUES 8 -#define MAX_TX_QUEUES MAX_RX_QUEUES +#define MAX_QUEUES 16 #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. @@ -142,9 +144,6 @@ struct i40e_q_vector { #define OTHER_VECTOR 1 #define NONQ_VECS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 4 -#define MAX_MSIX_COUNT 5 - #define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) @@ -190,19 +189,19 @@ struct i40evf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work init_task; - struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + struct i40e_q_vector *q_vectors; struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; int num_active_queues; /* TX */ - struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; + struct i40e_ring *tx_rings; u32 tx_timeout_count; struct list_head mac_filter_list; u32 tx_desc_count; /* RX */ - struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; + struct i40e_ring *rx_rings; u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; @@ -313,4 +312,8 @@ void i40evf_request_reset(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); +int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, + u16 lut_size); +int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, + u16 lut_size); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 4790437a50ac..a4c9feb589e7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -121,12 +121,12 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, data[i] = *(u64 *)p; } for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->tx_rings[j]->stats.packets; - data[i++] = adapter->tx_rings[j]->stats.bytes; + data[i++] = adapter->tx_rings[j].stats.packets; + data[i++] = adapter->tx_rings[j].stats.bytes; } for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->rx_rings[j]->stats.packets; - data[i++] = adapter->rx_rings[j]->stats.bytes; + data[i++] = adapter->rx_rings[j].stats.packets; + data[i++] = adapter->rx_rings[j].stats.bytes; } } @@ -351,7 +351,7 @@ static int i40evf_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { - q_vector = adapter->q_vector[i]; + q_vector = &adapter->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); @@ -634,25 +634,34 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - u32 hlut_val; - int i, j; + struct i40e_vsi *vsi = &adapter->vsi; + u8 *seed = NULL, *lut; + int ret; + u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; - if (indir) { - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; - } - } - return 0; + seed = key; + + lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); + if (ret) + goto out; + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) + indir[i] = (u32)lut[i]; + +out: + kfree(lut); + + return ret; } /** @@ -668,9 +677,9 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - u32 hlut_val; - int i, j; + struct i40e_vsi *vsi = &adapter->vsi; + u8 *seed = NULL; + u16 i; /* We do not allow change in unsupported parameters */ if (key || @@ -679,15 +688,29 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = indir[j++]; - hlut_val |= indir[j++] << 8; - hlut_val |= indir[j++] << 16; - hlut_val |= indir[j++] << 24; - wr32(hw, I40E_VFQF_HLUT(i), hlut_val); + if (key) { + if (!vsi->rss_hkey_user) { + vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_hkey_user) + return -ENOMEM; + } + memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE); + seed = vsi->rss_hkey_user; + } + if (!vsi->rss_lut_user) { + vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_lut_user) + return -ENOMEM; } - return 0; + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) + vsi->rss_lut_user[i] = (u8)(indir[i]); + + return i40evf_config_rss(vsi, seed, vsi->rss_lut_user, + I40EVF_HLUT_ARRAY_SIZE); } static const struct ethtool_ops i40evf_ethtool_ops = { diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 99d2cffae0cd..a0bbb6b9f5aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,15 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.3.33" +#define DRV_KERN "-k" + +#define DRV_VERSION_MAJOR 1 +#define DRV_VERSION_MINOR 4 +#define DRV_VERSION_BUILD 4 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) \ + DRV_KERN const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation."; @@ -259,7 +267,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) { struct i40e_hw *hw = &adapter->hw; int i; - uint32_t dyn_ctl; + u32 dyn_ctl; if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); @@ -307,10 +315,9 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct i40e_hw *hw = &adapter->hw; u32 val; - /* handle non-queue interrupts */ - rd32(hw, I40E_VFINT_ICR01); - rd32(hw, I40E_VFINT_ICR0_ENA1); - + /* handle non-queue interrupts, these reads clear the registers */ + val = rd32(hw, I40E_VFINT_ICR01); + val = rd32(hw, I40E_VFINT_ICR0_ENA1); val = rd32(hw, I40E_VFINT_DYN_CTL01) | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; @@ -348,8 +355,8 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) static void i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) { - struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; - struct i40e_ring *rx_ring = adapter->rx_rings[r_idx]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; @@ -369,8 +376,8 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) static void i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) { - struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; - struct i40e_ring *tx_ring = adapter->tx_rings[t_idx]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; @@ -465,7 +472,7 @@ static void i40evf_netpoll(struct net_device *netdev) return; for (i = 0; i < q_vectors; i++) - i40evf_msix_clean_rings(0, adapter->q_vector[i]); + i40evf_msix_clean_rings(0, &adapter->q_vectors[i]); } #endif @@ -487,7 +494,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = adapter->q_vector[vector]; + struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -532,7 +539,7 @@ free_queue_irqs: adapter->msix_entries[vector + NONQ_VECS].vector, NULL); free_irq(adapter->msix_entries[vector + NONQ_VECS].vector, - adapter->q_vector[vector]); + &adapter->q_vectors[vector]); } return err; } @@ -582,7 +589,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) irq_set_affinity_hint(adapter->msix_entries[i+1].vector, NULL); free_irq(adapter->msix_entries[i+1].vector, - adapter->q_vector[i]); + &adapter->q_vectors[i]); } } @@ -611,7 +618,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) int i; for (i = 0; i < adapter->num_active_queues; i++) - adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); + adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); } /** @@ -656,8 +663,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) } for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i]->rx_buf_len = rx_buf_len; + adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; } } @@ -954,7 +961,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; napi_enable(napi); } @@ -971,7 +978,7 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; napi_disable(&q_vector->napi); } } @@ -992,7 +999,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; for (i = 0; i < adapter->num_active_queues; i++) { - struct i40e_ring *ring = adapter->rx_rings[i]; + struct i40e_ring *ring = &adapter->rx_rings[i]; i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; @@ -1112,16 +1119,10 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) **/ static void i40evf_free_queues(struct i40evf_adapter *adapter) { - int i; - if (!adapter->vsi_res) return; - for (i = 0; i < adapter->num_active_queues; i++) { - if (adapter->tx_rings[i]) - kfree_rcu(adapter->tx_rings[i], rcu); - adapter->tx_rings[i] = NULL; - adapter->rx_rings[i] = NULL; - } + kfree(adapter->tx_rings); + kfree(adapter->rx_rings); } /** @@ -1136,13 +1137,20 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) { int i; + adapter->tx_rings = kcalloc(adapter->num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + adapter->rx_rings = kcalloc(adapter->num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *tx_ring; struct i40e_ring *rx_ring; - tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); - if (!tx_ring) - goto err_out; + tx_ring = &adapter->tx_rings[i]; tx_ring->queue_index = i; tx_ring->netdev = adapter->netdev; @@ -1150,14 +1158,12 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->count = adapter->tx_desc_count; if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; - adapter->tx_rings[i] = tx_ring; - rx_ring = &tx_ring[1]; + rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - adapter->rx_rings[i] = rx_ring; } return 0; @@ -1207,115 +1213,273 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) err = i40evf_acquire_msix_vectors(adapter, v_budget); out: - adapter->netdev->real_num_tx_queues = pairs; + netif_set_real_num_rx_queues(adapter->netdev, pairs); + netif_set_real_num_tx_queues(adapter->netdev, pairs); return err; } /** - * i40e_configure_rss_aq - Prepare for RSS using AQ commands + * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { - struct i40e_aqc_get_set_rss_key_data rss_key; struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; - int ret = 0, i; - u8 *rss_lut; + int ret = 0; if (!vsi->id) - return; + return -EINVAL; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", adapter->current_op); - return; + return -EBUSY; } - memset(&rss_key, 0, sizeof(rss_key)); - memcpy(&rss_key, seed, sizeof(rss_key)); + if (seed) { + struct i40e_aqc_get_set_rss_key_data *rss_key = + (struct i40e_aqc_get_set_rss_key_data *)seed; + ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } - rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL); - if (!rss_lut) - return; + if (lut) { + ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } - /* Populate the LUT with max no. PF queues in round robin fashion */ - for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++) - rss_lut[i] = i % adapter->num_active_queues; + return ret; +} - ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return; +/** + * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); } - ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut, - (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4); - if (ret) - dev_err(&adapter->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40EVF_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]); + } + i40e_flush(hw); + + return 0; } /** - * i40e_configure_rss_reg - Prepare for RSS if used - * @adapter: board private structure - * @seed: RSS hash seed + * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter, - const u8 *seed) +static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) { + struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; - u32 *seed_dw = (u32 *)seed; - u32 cqueue = 0; - u32 lut = 0; - int i, j; + int ret = 0; - /* Fill out hash function seed */ - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); - - /* Populate the LUT with max no. PF queues in round robin fashion */ - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (cqueue == adapter->num_active_queues) - cqueue = 0; - lut |= ((cqueue) << (8 * j)); - cqueue++; + if (seed) { + ret = i40evf_aq_get_rss_key(hw, vsi->id, + (struct i40e_aqc_get_set_rss_key_data *)seed); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot get RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; } - wr32(hw, I40E_VFQF_HLUT(i), lut); } - i40e_flush(hw); + + if (lut) { + ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot get RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } + + return ret; } /** - * i40evf_configure_rss - Prepare for RSS + * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed, + const u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + u16 i; + + if (seed) { + u32 *seed_dw = (u32 *)seed; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i)); + } + + if (lut) { + u32 *lut_dw = (u32 *)lut; + + if (lut_size != I40EVF_HLUT_ARRAY_SIZE) + return -EINVAL; + + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i)); + } + + return 0; +} + +/** + * i40evf_config_rss - Configure RSS keys and lut + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + + if (RSS_AQ(adapter)) + return i40evf_config_rss_aq(vsi, seed, lut, lut_size); + else + return i40evf_config_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40evf_get_rss - Get RSS keys and lut + * @vsi: Pointer to vsi structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + **/ +int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) +{ + struct i40evf_adapter *adapter = vsi->back; + + if (RSS_AQ(adapter)) + return i40evf_get_rss_aq(vsi, seed, lut, lut_size); + else + return i40evf_get_rss_reg(vsi, seed, lut, lut_size); +} + +/** + * i40evf_fill_rss_lut - Fill the lut with default values + * @lut: Lookup table to be filled with + * @rss_table_size: Lookup table size + * @rss_size: Range of queue number for hashing + **/ +static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) +{ + u16 i; + + for (i = 0; i < rss_table_size; i++) + lut[i] = i % rss_size; +} + +/** + * i40evf_init_rss - Prepare for RSS * @adapter: board private structure + * + * Return 0 on success, negative on failure **/ -static void i40evf_configure_rss(struct i40evf_adapter *adapter) +static int i40evf_init_rss(struct i40evf_adapter *adapter) { + struct i40e_vsi *vsi = &adapter->vsi; struct i40e_hw *hw = &adapter->hw; u8 seed[I40EVF_HKEY_ARRAY_SIZE]; u64 hena; - - netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + u8 *lut; + int ret; /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ hena = I40E_DEFAULT_RSS_HENA; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - if (RSS_AQ(adapter)) - i40evf_configure_rss_aq(&adapter->vsi, seed); + lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Use user configured lut if there is one, otherwise use default */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE); + else + i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE, + adapter->num_active_queues); + + /* Use user configured hash key if there is one, otherwise + * user default. + */ + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE); else - i40evf_configure_rss_reg(adapter, seed); + netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); + kfree(lut); + + return ret; } /** @@ -1327,21 +1491,22 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) **/ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) { - int q_idx, num_q_vectors; + int q_idx = 0, num_q_vectors; struct i40e_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), + GFP_KERNEL); + if (!adapter->q_vectors) + goto err_out; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) - goto err_out; + q_vector = &adapter->q_vectors[q_idx]; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, i40evf_napi_poll, NAPI_POLL_WEIGHT); - adapter->q_vector[q_idx] = q_vector; } return 0; @@ -1349,11 +1514,10 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) err_out: while (q_idx) { q_idx--; - q_vector = adapter->q_vector[q_idx]; + q_vector = &adapter->q_vectors[q_idx]; netif_napi_del(&q_vector->napi); - kfree(q_vector); - adapter->q_vector[q_idx] = NULL; } + kfree(adapter->q_vectors); return -ENOMEM; } @@ -1374,13 +1538,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; - - adapter->q_vector[q_idx] = NULL; + struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; if (q_idx < napi_vectors) netif_napi_del(&q_vector->napi); - kfree(q_vector); } + kfree(adapter->q_vectors); } /** @@ -1439,6 +1601,22 @@ err_set_interrupt: } /** + * i40evf_clear_rss_config_user - Clear user configurations of RSS + * @vsi: Pointer to VSI structure + **/ +static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi) +{ + if (!vsi) + return; + + kfree(vsi->rss_hkey_user); + vsi->rss_hkey_user = NULL; + + kfree(vsi->rss_lut_user); + vsi->rss_lut_user = NULL; +} + +/** * i40evf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long **/ @@ -1565,7 +1743,7 @@ static void i40evf_watchdog_task(struct work_struct *work) * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } @@ -1864,9 +2042,12 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->tx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->tx_rings[i]->desc) - i40evf_free_tx_resources(adapter->tx_rings[i]); + if (adapter->tx_rings[i].desc) + i40evf_free_tx_resources(&adapter->tx_rings[i]); } /** @@ -1884,8 +2065,8 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { - adapter->tx_rings[i]->count = adapter->tx_desc_count; - err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -1911,8 +2092,8 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i]->count = adapter->rx_desc_count; - err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -1932,9 +2113,12 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->rx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->rx_rings[i]->desc) - i40evf_free_rx_resources(adapter->rx_rings[i]); + if (adapter->rx_rings[i].desc) + i40evf_free_rx_resources(&adapter->rx_rings[i]); } /** @@ -2137,7 +2321,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) netdev->features |= NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | + NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | @@ -2263,6 +2447,14 @@ static void i40evf_init_task(struct work_struct *work) if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { err = i40evf_send_vf_config_msg(adapter); goto err; + } else if (err == I40E_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + i40evf_shutdown_adminq(hw); + dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); + return; } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", @@ -2313,7 +2505,7 @@ static void i40evf_init_task(struct work_struct *work) I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; if (!RSS_AQ(adapter)) - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -2334,7 +2526,6 @@ static void i40evf_init_task(struct work_struct *work) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - dev_info(&pdev->dev, "%s\n", i40evf_driver_string); adapter->state = __I40EVF_DOWN; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); @@ -2343,7 +2534,7 @@ static void i40evf_init_task(struct work_struct *work) adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } else { - i40evf_configure_rss(adapter); + i40evf_init_rss(adapter); } return; restart: @@ -2438,8 +2629,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), - MAX_TX_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -2632,6 +2822,9 @@ static void i40evf_remove(struct pci_dev *pdev) flush_scheduled_work(); + /* Clear user configurations for RSS */ + i40evf_clear_rss_config_user(&adapter->vsi); + if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 32e620e1eb5c..c1c526283757 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -157,7 +157,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | I40E_VIRTCHNL_VF_OFFLOAD_VLAN | - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) @@ -242,7 +244,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); - vqci = kzalloc(len, GFP_ATOMIC); + vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; @@ -255,19 +257,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) for (i = 0; i < pairs; i++) { vqpi->txq.vsi_id = vqci->vsi_id; vqpi->txq.queue_id = i; - vqpi->txq.ring_len = adapter->tx_rings[i]->count; - vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; + vqpi->txq.ring_len = adapter->tx_rings[i].count; + vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; vqpi->txq.headwb_enabled = 1; vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.queue_id = i; - vqpi->rxq.ring_len = adapter->rx_rings[i]->count; - vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; + vqpi->rxq.ring_len = adapter->rx_rings[i].count; + vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; - vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; + vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; vqpi++; } @@ -353,14 +355,14 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) len = sizeof(struct i40e_virtchnl_irq_map_info) + (adapter->num_msix_vectors * sizeof(struct i40e_virtchnl_vector_map)); - vimi = kzalloc(len, GFP_ATOMIC); + vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; vimi->num_vectors = adapter->num_msix_vectors; /* Queue vectors first */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = adapter->q_vector[v_idx]; + q_vector = adapter->q_vectors + v_idx; vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; @@ -391,6 +393,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; struct i40evf_mac_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -415,10 +418,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } - veal = kzalloc(len, GFP_ATOMIC); + veal = kzalloc(len, GFP_KERNEL); if (!veal) return; @@ -431,7 +436,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -450,6 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; struct i40evf_mac_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -474,9 +481,11 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } - veal = kzalloc(len, GFP_ATOMIC); + veal = kzalloc(len, GFP_KERNEL); if (!veal) return; @@ -490,7 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -509,6 +519,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct i40evf_vlan_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -534,9 +545,11 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } - vvfl = kzalloc(len, GFP_ATOMIC); + vvfl = kzalloc(len, GFP_KERNEL); if (!vvfl) return; @@ -549,7 +562,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -567,6 +581,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; struct i40evf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -592,9 +607,11 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } - vvfl = kzalloc(len, GFP_ATOMIC); + vvfl = kzalloc(len, GFP_KERNEL); if (!vvfl) return; @@ -608,7 +625,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -724,9 +742,29 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, return; } if (v_retval) { - dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", - v_retval, i40evf_stat_str(&adapter->hw, v_retval), - v_opcode); + switch (v_opcode) { + case I40E_VIRTCHNL_OP_ADD_VLAN: + dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", + v_retval, + i40evf_stat_str(&adapter->hw, v_retval), + v_opcode); + } } switch (v_opcode) { case I40E_VIRTCHNL_OP_GET_STATS: { diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 7a73510e547c..adb33e2a0137 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -45,8 +45,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *); static s32 igb_init_hw_82575(struct e1000_hw *); static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); -static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); -static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); static s32 igb_reset_hw_82575(struct e1000_hw *); static s32 igb_reset_hw_82580(struct e1000_hw *); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); @@ -205,13 +203,10 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) case e1000_82580: case e1000_i350: case e1000_i354: - phy->ops.read_reg = igb_read_phy_reg_82580; - phy->ops.write_reg = igb_write_phy_reg_82580; - break; case e1000_i210: case e1000_i211: - phy->ops.read_reg = igb_read_phy_reg_gs40g; - phy->ops.write_reg = igb_write_phy_reg_gs40g; + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; break; default: phy->ops.read_reg = igb_read_phy_reg_igp; @@ -272,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) if (ret_val) goto out; } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = igb_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; @@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) case I210_I_PHY_ID: phy->type = e1000_phy_i210; phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; phy->ops.get_phy_info = igb_get_phy_info_m88; phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; @@ -925,6 +926,8 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) if (phy->id == M88E1512_E_PHY_ID) ret_val = igb_initialize_M88E1512_phy(hw); + if (phy->id == M88E1543_E_PHY_ID) + ret_val = igb_initialize_M88E1543_phy(hw); out: return ret_val; } @@ -2145,7 +2148,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ -static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; @@ -2169,7 +2172,7 @@ out: * * Writes data to MDI control register in the PHY at offset. **/ -static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index b1915043bc0c..c3c598c347a9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -927,7 +927,10 @@ /* Intel i347-AT4 Registers */ -#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ #define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ #define I347AT4_PAGE_SELECT 0x16 @@ -990,6 +993,7 @@ #define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ #define E1000_M88E1543_EEE_CTRL_1 0x0 #define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 #define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 2003b3756ba2..4034207eb5cc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -441,6 +441,7 @@ struct e1000_phy_info { u16 cable_length; u16 max_cable_length; u16 min_cable_length; + u16 pair_length[4]; u8 mdix; diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 65d931669f81..8aa798737d4d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -861,10 +861,10 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) if (ret_val) nvm_word = E1000_INVM_DEFAULT_AL; tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { /* check current state directly from internal PHY */ - igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | - E1000_PHY_PLL_FREQ_REG), &phy_word); + igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); if ((phy_word & E1000_PHY_PLL_UNCONF) != E1000_PHY_PLL_UNCONF) { ret_val = 0; @@ -896,7 +896,35 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) /* restore WUC register */ wr32(E1000_WUC, wuc); } + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); /* restore MDICNFG setting */ wr32(E1000_MDICNFG, mdicnfg); return ret_val; } + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 3442b6357d01..b2964a2a60b1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_init_nvm_params_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw); s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 @@ -84,7 +85,7 @@ enum E1000_INVM_STRUCTURE_TYPE { #define E1000_PCI_PMCSR_D3 0x03 #define E1000_MAX_PLL_TRIES 5 #define E1000_PHY_PLL_UNCONF 0xFF -#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_PAGE 0xFC #define E1000_PHY_PLL_FREQ_REG 0x000E #define E1000_INVM_DEFAULT_AL 0x202F #define E1000_INVM_AUTOLOAD 0x0A diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 23ec28f43f6d..5b54254aed4f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1717,59 +1717,76 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, phy_data2, index, default_page, is_cm; + int len_tot = 0; + u16 len_min; + u16 len_max; switch (hw->phy.id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: case I210_I_PHY_ID: - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + - (I347AT4_PCDL + phy->addr), - &phy_data); + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); if (ret_val) - return ret_val; + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + - I347AT4_PCDC, &phy_data2); + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); if (ret_val) - return ret_val; + goto out; is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); - /* Populate the phy structure with cable length in meters */ - phy->min_cable_length = phy_data / (is_cm ? 100 : 1); - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); - break; - case M88E1543_E_PHY_ID: - case M88E1512_E_PHY_ID: - case I347AT4_E_PHY_ID: - /* Remember the original page select and set it to 7 */ - ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, - &default_page); + /* Get cable length from Pair 0 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL0, &phy_data); if (ret_val) goto out; - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + phy->pair_length[0] = phy_data / (is_cm ? 100 : 1); + len_tot = phy->pair_length[0]; + len_min = phy->pair_length[0]; + len_max = phy->pair_length[0]; + + /* Get cable length from Pair 1 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL1, &phy_data); if (ret_val) goto out; - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), - &phy_data); + phy->pair_length[1] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[1]; + len_min = min(len_min, phy->pair_length[1]); + len_max = max(len_max, phy->pair_length[1]); + + /* Get cable length from Pair 2 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL2, &phy_data); if (ret_val) goto out; - /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + phy->pair_length[2] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[2]; + len_min = min(len_min, phy->pair_length[2]); + len_max = max(len_max, phy->pair_length[2]); + + /* Get cable length from Pair 3 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL3, &phy_data); if (ret_val) goto out; - is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + phy->pair_length[3] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[3]; + len_min = min(len_min, phy->pair_length[3]); + len_max = max(len_max, phy->pair_length[3]); /* Populate the phy structure with cable length in meters */ - phy->min_cable_length = phy_data / (is_cm ? 100 : 1); - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); + phy->min_cable_length = len_min; + phy->max_cable_length = len_max; + phy->cable_length = len_tot / 4; /* Reset the page selec to its original value */ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, @@ -2278,6 +2295,100 @@ out: } /** + * igb_initialize_M88E1543_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** * igb_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * @@ -2494,66 +2605,6 @@ out: } /** - * igb_write_phy_reg_gs40g - Write GS40G PHY register - * @hw: pointer to the HW structure - * @offset: lower half is register offset to write to - * upper half is page to use. - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; - ret_val = igb_write_phy_reg_mdic(hw, offset, data); - -release: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * igb_read_phy_reg_gs40g - Read GS40G PHY register - * @hw: pointer to the HW structure - * @offset: lower half is register offset to read to - * upper half is page to use. - * @data: data to read at register offset - * - * Acquires semaphore, if necessary, then reads the data in the PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; - ret_val = igb_read_phy_reg_mdic(hw, offset, data); - -release: - hw->phy.ops.release(hw); - return ret_val; -} - -/** * igb_set_master_slave_mode - Setup PHY for Master/slave mode * @hw: pointer to the HW structure * diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 24d55edbb0e3..969a6ddafa3b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -62,6 +62,7 @@ void igb_power_up_phy_copper(struct e1000_hw *hw); void igb_power_down_phy_copper(struct e1000_hw *hw); s32 igb_phy_init_script_igp3(struct e1000_hw *hw); s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); @@ -71,8 +72,8 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw); s32 igb_get_phy_info_82580(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); s32 igb_get_cable_length_82580(struct e1000_hw *hw); -s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_check_polarity_m88(struct e1000_hw *hw); /* IGP01E1000 Specific Registers */ @@ -143,17 +144,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define E1000_CABLE_LENGTH_UNDEFINED 0xFF -/* GS40G - I210 PHY defines */ -#define GS40G_PAGE_SELECT 0x16 -#define GS40G_PAGE_SHIFT 16 -#define GS40G_OFFSET_MASK 0xFFFF -#define GS40G_PAGE_2 0x20000 -#define GS40G_MAC_REG2 0x15 -#define GS40G_MAC_LB 0x4140 -#define GS40G_MAC_SPEED_1G 0X0006 -#define GS40G_COPPER_SPEC 0x0010 -#define GS40G_LINE_LB 0x4000 - /* SFP modules ID memory locations */ #define E1000_SFF_IDENTIFIER_OFFSET 0x00 #define E1000_SFF_IDENTIFIER_SFF 0x02 diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 4af2870e49f8..21d9d02885cb 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -66,6 +66,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ #define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ @@ -385,8 +386,7 @@ do { \ #define array_wr32(reg, offset, value) \ wr32((reg) + ((offset) << 2), (value)) -#define array_rd32(reg, offset) \ - (readl(hw->hw_addr + reg + ((offset) << 2))) +#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2))) /* DMA Coalescing registers */ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 1a2f1cc44b28..e3cb93bdb21a 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -389,6 +389,8 @@ struct igb_adapter { u16 link_speed; u16 link_duplex; + u8 __iomem *io_addr; /* Mainly for iounmap use */ + struct work_struct reset_task; struct work_struct watchdog_task; bool fc_autoneg; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 2529bc625de4..1d329f1d047b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -127,10 +127,20 @@ static const struct igb_stats igb_gstrings_net_stats[] = { #define IGB_STATS_LEN \ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" }; #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) @@ -2002,7 +2012,7 @@ static void igb_diag_test(struct net_device *netdev, /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ - if (igb_link_test(adapter, &data[4])) + if (igb_link_test(adapter, &data[TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; if (if_running) @@ -2011,21 +2021,21 @@ static void igb_diag_test(struct net_device *netdev, else igb_reset(adapter); - if (igb_reg_test(adapter, &data[0])) + if (igb_reg_test(adapter, &data[TEST_REG])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); - if (igb_eeprom_test(adapter, &data[1])) + if (igb_eeprom_test(adapter, &data[TEST_EEP])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); - if (igb_intr_test(adapter, &data[2])) + if (igb_intr_test(adapter, &data[TEST_IRQ])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); /* power up link for loopback test */ igb_power_up_link(adapter); - if (igb_loopback_test(adapter, &data[3])) + if (igb_loopback_test(adapter, &data[TEST_LOOP])) eth_test->flags |= ETH_TEST_FL_FAILED; /* restore speed, duplex, autoneg settings */ @@ -2045,16 +2055,16 @@ static void igb_diag_test(struct net_device *netdev, dev_info(&adapter->pdev->dev, "online testing starting\n"); /* PHY is powered down when interface is down */ - if (if_running && igb_link_test(adapter, &data[4])) + if (if_running && igb_link_test(adapter, &data[TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; else - data[4] = 0; + data[TEST_LINK] = 0; /* Online tests aren't run; pass by default */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; clear_bit(__IGB_TESTING, &adapter->state); } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea7b09887245..31e5f3942839 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -946,7 +946,6 @@ static void igb_configure_msix(struct igb_adapter *adapter) static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, @@ -959,7 +958,7 @@ static int igb_request_msix(struct igb_adapter *adapter) vector++; - q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); + q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); if (q_vector->rx.ring && q_vector->tx.ring) sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, @@ -1230,7 +1229,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, q_vector->tx.work_limit = adapter->tx_work_limit; /* initialize ITR configuration */ - q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); + q_vector->itr_register = adapter->io_addr + E1000_EITR(0); q_vector->itr_val = IGB_START_ITR; /* initialize pointer to rings */ @@ -2294,9 +2293,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); err = -EIO; - hw->hw_addr = pci_iomap(pdev, 0, 0); - if (!hw->hw_addr) + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igb_netdev_ops; igb_set_ethtool_ops(netdev); @@ -2378,8 +2379,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } if (hw->mac.type >= e1000_82576) { - netdev->hw_features |= NETIF_F_SCTP_CSUM; - netdev->features |= NETIF_F_SCTP_CSUM; + netdev->hw_features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_SCTP_CRC; } netdev->priv_flags |= IFF_UNICAST_FLT; @@ -2656,7 +2657,7 @@ err_sw_init: #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@ -2823,7 +2824,7 @@ static void igb_remove(struct pci_dev *pdev) igb_clear_interrupt_scheme(adapter); - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, @@ -2856,6 +2857,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter) if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + pci_sriov_set_totalvfs(pdev, 7); igb_enable_sriov(pdev, max_vfs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 1d2174526a4c..4b9156cd8b93 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -139,6 +139,7 @@ enum ixgbe_tx_flags { #define IXGBE_X540_VF_DEVICE_ID 0x1515 struct vf_data_storage { + struct pci_dev *vfdev; unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; @@ -224,6 +225,8 @@ struct ixgbe_rx_queue_stats { u64 csum_err; }; +#define IXGBE_TS_HDR_LEN 8 + enum ixgbe_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, @@ -282,6 +285,8 @@ struct ixgbe_ring { u16 next_to_use; u16 next_to_clean; + unsigned long last_rx_timestamp; + union { u16 next_to_alloc; struct { @@ -312,7 +317,7 @@ enum ixgbe_ring_f_enum { }; #define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_RSS_INDICES_X550 64 +#define IXGBE_MAX_RSS_INDICES_X550 63 #define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ #define IXGBE_MAX_FCOE_INDICES 8 @@ -587,9 +592,10 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) struct ixgbe_mac_addr { u8 addr[ETH_ALEN]; - u16 queue; + u16 pool; u16 state; /* bitmask */ }; + #define IXGBE_MAC_STATE_DEFAULT 0x1 #define IXGBE_MAC_STATE_MODIFIED 0x2 #define IXGBE_MAC_STATE_IN_USE 0x4 @@ -639,6 +645,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) +#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) +#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) @@ -656,6 +664,7 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_VXLAN #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #endif +#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) /* Tx fast path data */ int num_tx_queues; @@ -755,9 +764,12 @@ struct ixgbe_adapter { unsigned long last_rx_ptp_check; unsigned long last_rx_timestamp; spinlock_t tmreg_lock; - struct cyclecounter cc; - struct timecounter tc; + struct cyclecounter hw_cc; + struct timecounter hw_tc; u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp)(struct ixgbe_adapter *); /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); @@ -883,9 +895,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); #endif int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - u8 *addr, u16 queue); + const u8 *addr, u16 queue); int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, - u8 *addr, u16 queue); + const u8 *addr, u16 queue); +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, struct ixgbe_ring *); @@ -968,12 +981,33 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb); +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); +static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { + ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); + return; + } + + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + return; + + ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 65db69b862fb..d8a9fb8a59e2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -765,13 +765,14 @@ mac_reset_top: ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; @@ -879,11 +880,12 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFTA * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @vlvf_bypass: boolean flag - unused * * Turn on/off specified VLAN in the VLAN filter table. **/ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool vlvf_bypass) { u32 regindex; u32 bitindex; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index a39afcf03e2c..fa8d4f40ac2a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -990,13 +990,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -1082,12 +1083,16 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index ce61b36b94f1..64045053e874 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1884,10 +1884,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - - /* clear VMDq pool/queue selection for RAR 0 */ - hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); } + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; @@ -2454,6 +2455,17 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + /* Poll for bit to read as set */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) + break; + usleep_range(100, 120); + } + if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { + hw_dbg(hw, "GIO disable did not set - requesting resets\n"); + goto gio_disable_fail; + } + /* Exit if master requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || ixgbe_removed(hw->hw_addr)) @@ -2475,6 +2487,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * again to clear out any effects they may have had on our device. */ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); +gio_disable_fail: hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; if (hw->mac.type >= ixgbe_mac_X550) @@ -2987,43 +3000,44 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) * return the VLVF index where this VLAN id should be placed * **/ -static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { - u32 bits = 0; - u32 first_empty_slot = 0; - s32 regindex; + s32 regindex, first_empty_slot; + u32 bits; /* short cut the special case */ if (vlan == 0) return 0; - /* - * Search for the vlan id in the VLVF entries. Save off the first empty - * slot found along the way - */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; + + /* add VLAN enable bit for comparison */ + vlan |= IXGBE_VLVF_VIEN; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if (!bits && !(first_empty_slot)) + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) first_empty_slot = regindex; - else if ((bits & 0x0FFF) == vlan) - break; } - /* - * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan - * in the VLVF. Else use the first empty VLVF register for this - * vlan id. - */ - if (regindex >= IXGBE_VLVF_ENTRIES) { - if (first_empty_slot) - regindex = first_empty_slot; - else { - hw_dbg(hw, "No space in VLVF.\n"); - regindex = IXGBE_ERR_NO_SPACE; - } - } + /* If we are here then we didn't find the VLAN. Return first empty + * slot we found during our search, else error. + */ + if (!first_empty_slot) + hw_dbg(hw, "No space in VLVF.\n"); - return regindex; + return first_empty_slot ? : IXGBE_ERR_NO_SPACE; } /** @@ -3032,21 +3046,17 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFVFB * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool vlvf_bypass) { - s32 regindex; - u32 bitindex; - u32 vfta; - u32 bits; - u32 vt; - u32 targetbit; - bool vfta_changed = false; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; - if (vlan > 4095) + if ((vlan > 4095) || (vind > 63)) return IXGBE_ERR_PARAM; /* @@ -3061,22 +3071,16 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[11-5]: which register * bits[4-0]: which bit in the register */ - regindex = (vlan >> 5) & 0x7F; - bitindex = vlan & 0x1F; - targetbit = (1 << bitindex); - vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); - - if (vlan_on) { - if (!(vfta & targetbit)) { - vfta |= targetbit; - vfta_changed = true; - } - } else { - if ((vfta & targetbit)) { - vfta &= ~targetbit; - vfta_changed = true; - } - } + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; /* Part 2 * If VT Mode is set @@ -3086,85 +3090,67 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * Or !vlan_on * clear the pool bit and possibly the vind */ - vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - if (vt & IXGBE_VT_CTL_VT_ENABLE) { - s32 vlvf_index; - - vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); - if (vlvf_index < 0) - return vlvf_index; - - if (vlan_on) { - /* set the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits |= (1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits |= (1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - } - } else { - /* clear the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits &= ~(1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits &= ~(1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - } - } + if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) + goto vfta_update; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } - /* - * If there are still bits set in the VLVFB registers - * for the VLAN ID indicated we need to see if the - * caller is requesting that we clear the VFTA entry bit. - * If the caller has requested that we clear the VFTA - * entry bit but there are still pools/VFs using this VLAN - * ID entry then ignore the request. We're not worried - * about the case where we're turning the VFTA VLAN ID - * entry bit on, only when requested to turn it off as - * there may be multiple pools and/or VFs using the - * VLAN ID entry. In that case we cannot clear the - * VFTA bit until all pools/VFs using that VLAN ID have also - * been cleared. This will be indicated by "bits" being - * zero. + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); + + /* set the pool bit */ + bits |= 1 << (vind % 32); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (vind % 32); + + if (!bits && + !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool */ - if (bits) { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), - (IXGBE_VLVF_VIEN | vlan)); - if (!vlan_on) { - /* someone wants to clear the vfta entry - * but some pools/VFs are still using it. - * Ignore it. */ - vfta_changed = false; - } - } else { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); - } + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + /* disable VLVF and clear remaining bit from pool */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); + + return 0; } - if (vfta_changed) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); + +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); return 0; } @@ -3184,8 +3170,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index a0044e4a8b90..2b9563137fd8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -92,7 +92,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); + u32 vind, bool vlan_on, bool vlvf_bypass); s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index a507a6fe3624..02c7333a9c83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -139,6 +139,11 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ @@ -149,7 +154,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ - if (credit_max && (credit_max < min_credit)) + if (credit_max < min_credit) credit_max = min_credit; if (direction == DCB_TX_CONFIG) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 23277ab153b6..b5cc989a3d23 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -223,13 +223,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) reg |= IXGBE_MFLCN_DPF; /* - * X540 supports per TC Rx priority flow control. So - * clear all TCs and only enable those that should be + * X540 & X550 supports per TC Rx priority flow control. + * So clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); - if (hw->mac.type == ixgbe_mac_X540) + if (hw->mac.type >= ixgbe_mac_X540) reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; if (pfc_en) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d681273bd39d..bea96b3bc90c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -151,47 +151,70 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { }; #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +/* currently supported speeds for 10G */ +#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ + SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) + +static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) +{ + if (!ixgbe_isbackplane(hw->phy.media_type)) + return SUPPORTED_10000baseT_Full; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + return SUPPORTED_10000baseKX4_Full; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + return SUPPORTED_10000baseKR_Full; + default: + return SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full; + } +} + static int ixgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; - u32 link_speed = 0; bool autoneg = false; - bool link_up; hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->supported |= SUPPORTED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) - ecmd->supported |= SUPPORTED_2500baseX_Full; + ecmd->supported |= ixgbe_get_supported_10gtypes(hw); if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) ecmd->supported |= SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; + /* default advertised speed if phy.autoneg_advertised isn't set */ + ecmd->advertising = ecmd->supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { + ecmd->advertising = 0; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - ecmd->advertising |= ADVERTISED_2500baseX_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } } else { - /* default modes in case phy.autoneg_advertised isn't set */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.multispeed_fiber && !autoneg) { if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ecmd->advertising = ADVERTISED_10000baseT_Full; @@ -229,6 +252,10 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: /* SFP+ devices, further checking needed */ switch (adapter->hw.phy.sfp_type) { case ixgbe_sfp_type_da_cu: @@ -284,9 +311,8 @@ static int ixgbe_get_settings(struct net_device *netdev, break; } - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) { - switch (link_speed) { + if (netif_carrier_ok(netdev)) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 631c603fc966..2a653ec954f5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -77,7 +77,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (!netdev) return 0; - if (xid >= IXGBE_FCOE_DDP_MAX) + if (xid >= netdev->fcoe_ddp_xid) return 0; adapter = netdev_priv(netdev); @@ -177,7 +177,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 0; adapter = netdev_priv(netdev); - if (xid >= IXGBE_FCOE_DDP_MAX) { + if (xid >= netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } @@ -517,6 +517,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; + u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; u8 sof, eof; if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { @@ -593,6 +594,8 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; first->tx_flags |= IXGBE_TX_FLAGS_TSO; + /* Hardware expects L4T to be RSV for FCoE TSO */ + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; } /* set flag indicating FCOE to ixgbe_tx_map call */ @@ -610,7 +613,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, /* write context desc */ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, - IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); + type_tucmd, mss_l4len_idx); return 0; } @@ -620,8 +623,7 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) struct ixgbe_fcoe_ddp_pool *ddp_pool; ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - if (ddp_pool->pool) - dma_pool_destroy(ddp_pool->pool); + dma_pool_destroy(ddp_pool->pool); ddp_pool->pool = NULL; } @@ -997,8 +999,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, return -EINVAL; /* Don't return information on unsupported devices */ - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X540) + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return -EINVAL; /* Manufacturer */ @@ -1044,6 +1045,10 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, snprintf(info->model, sizeof(info->model), "Intel 82599"); + } else if (hw->mac.type == ixgbe_mac_X550) { + snprintf(info->model, + sizeof(info->model), + "Intel X550"); } else { snprintf(info->model, sizeof(info->model), diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f3168bcc7d87..e771e764daa3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -844,7 +844,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); - napi_hash_add(&q_vector->napi); #ifdef CONFIG_NET_RX_BUSY_POLL /* initialize busy poll */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index aed8d029b23d..c4003a88bbf6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -65,9 +65,6 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" -#ifdef CONFIG_IXGBE_VXLAN -#include <net/vxlan.h> -#endif char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -175,6 +172,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *ixgbe_wq; + static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, @@ -316,7 +315,7 @@ static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) if (!test_bit(__IXGBE_DOWN, &adapter->state) && !test_bit(__IXGBE_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) - schedule_work(&adapter->service_task); + queue_work(ixgbe_wq, &adapter->service_task); } static void ixgbe_remove_adapter(struct ixgbe_hw *hw) @@ -1484,7 +1483,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { - ring->rx_stats.csum_err++; + skb->ip_summed = CHECKSUM_NONE; return; } /* If we checked the outer header let the stack know */ @@ -1635,6 +1634,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; + u32 flags = rx_ring->q_vector->adapter->flags; ixgbe_update_rsc_stats(rx_ring, skb); @@ -1642,8 +1642,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); - if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) - ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) + ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -1659,6 +1659,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { + skb_mark_napi_id(skb, &q_vector->napi); if (ixgbe_qv_busy_polling(q_vector)) netif_receive_skb(skb); else @@ -2123,7 +2124,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } #endif /* IXGBE_FCOE */ - skb_mark_napi_id(skb, &q_vector->napi); ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ @@ -2741,7 +2741,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter, eicr); + ixgbe_ptp_check_pps_event(adapter); /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2757,7 +2757,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); - if (!ixgbe_qv_lock_napi(q_vector)) + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) return budget; /* attempt to distribute budget to each queue fairly, but don't allow @@ -2947,10 +2948,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter, eicr); + ixgbe_ptp_check_pps_event(adapter); /* would disable interrupts here but EIAM disabled it */ - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); /* * re-enable link(maybe) and non-queue interrupts, no flush. @@ -3315,8 +3316,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, } /** - * Return a number of entries in the RSS indirection table - * + * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries * @adapter: device handle * * - 82598/82599/X540: 128 @@ -3334,8 +3334,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) } /** - * Write the RETA table to HW - * + * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. @@ -3374,8 +3373,7 @@ void ixgbe_store_reta(struct ixgbe_adapter *adapter) } /** - * Write the RETA table to HW (for x550 devices in SRIOV mode) - * + * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. @@ -3621,6 +3619,9 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); + /* Force flushing of IXGBE_RDLEN to prevent MDD */ + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); @@ -3704,6 +3705,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + /* * Set up VF register offsets for selected VT Mode, * i.e. 32 or 64 VFs for SR-IOV @@ -3901,12 +3905,56 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true); set_bit(vid, adapter->active_vlans); return 0; } +static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (idx = IXGBE_VLVF_ENTRIES; --idx;) { + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 bits, word; + int idx; + + idx = ixgbe_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + word = idx * 2 + (VMDQ_P(0) / 32); + bits = ~(1 << (VMDQ_P(0)) % 32); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); + } +} + static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { @@ -3914,7 +3962,11 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) + ixgbe_update_pf_promisc_vlvf(adapter, vid); + else + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); + clear_bit(vid, adapter->active_vlans); return 0; @@ -3992,6 +4044,129 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) } } +static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + default: + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + /* fall through */ + case ixgbe_mac_82598EB: + /* legacy case, we can just disable VLAN filtering */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + return; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; + + /* Add PF to all active pools */ + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); + u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); + + vlvfb |= 1 << (VMDQ_P(0) % 32); + IXGBE_WRITE_REG(hw, reg_offset, vlvfb); + } + + /* Set all bits in the VLAN filter table array */ + for (i = hw->mac.vft_size; i--;) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); +} + +#define VFTA_BLOCK_SIZE 8 +static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits; + + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern outselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + word = i * 2 + VMDQ_P(0) / 32; + bits = ~(1 << (VMDQ_P(0) % 32)); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); + } + + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); + } +} + +static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + default: + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + /* fall through */ + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + return; + } + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + + for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) + ixgbe_scrub_vfta(adapter, i); +} + static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { u16 vid; @@ -4034,124 +4209,156 @@ static int ixgbe_write_mc_addr_list(struct net_device *netdev) #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr, - adapter->mac_table[i].queue, + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, IXGBE_RAH_AV); else hw->mac.ops.clear_rar(hw, i); - - adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED); } } -#endif +#endif static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) { - if (adapter->mac_table[i].state & - IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, - adapter->mac_table[i].addr, - adapter->mac_table[i].queue, - IXGBE_RAH_AV); - else - hw->mac.ops.clear_rar(hw, i); - adapter->mac_table[i].state &= - ~(IXGBE_MAC_STATE_MODIFIED); - } + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) + continue; + + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); } } static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) { - int i; + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; + int i; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - eth_zero_addr(adapter->mac_table[i].addr); - adapter->mac_table[i].queue = 0; + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; } + ixgbe_sync_mac_table(adapter); } -static int ixgbe_available_rars(struct ixgbe_adapter *adapter) +static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i, count = 0; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state == 0) - count++; + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* do not count default RAR as available */ + if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) + continue; + + /* only count unused and addresses that belong to us */ + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { + if (mac_table->pool != pool) + continue; + } + + count++; } + return count; } /* this function destroys the first RAR entry */ -static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter, - u8 *addr) +static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; - memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].queue = VMDQ_P(0); - adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | - IXGBE_MAC_STATE_IN_USE); - hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, - adapter->mac_table[0].queue, + memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); + mac_table->pool = VMDQ_P(0); + + mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; + + hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, IXGBE_RAH_AV); } -int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) { + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; if (is_zero_ether_addr(addr)) return -EINVAL; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) continue; - adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | - IXGBE_MAC_STATE_IN_USE); - ether_addr_copy(adapter->mac_table[i].addr, addr); - adapter->mac_table[i].queue = queue; + + ether_addr_copy(mac_table->addr, addr); + mac_table->pool = pool; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED | + IXGBE_MAC_STATE_IN_USE; + ixgbe_sync_mac_table(adapter); + return i; } + return -ENOMEM; } -int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) { - /* search table for addr, if found, set to 0 and sync */ - int i; + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; + int i; if (is_zero_ether_addr(addr)) return -EINVAL; - for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (ether_addr_equal(addr, adapter->mac_table[i].addr) && - adapter->mac_table[i].queue == queue) { - adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - eth_zero_addr(adapter->mac_table[i].addr); - adapter->mac_table[i].queue = 0; - ixgbe_sync_mac_table(adapter); - return 0; - } + /* search table for addr, if found clear IN_USE flag and sync */ + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* we can only delete an entry if it is in use */ + if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) + continue; + /* we only care about entries that belong to the given pool */ + if (mac_table->pool != pool) + continue; + /* we only care about a specific MAC address */ + if (!ether_addr_equal(addr, mac_table->addr)) + continue; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; + + ixgbe_sync_mac_table(adapter); + + return 0; } + return -ENOMEM; } /** @@ -4169,7 +4376,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) int count = 0; /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter)) + if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) return -ENOMEM; if (!netdev_uc_empty(netdev)) { @@ -4183,6 +4390,25 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) return count; } +static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -4197,12 +4423,10 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; - u32 vlnctrl; int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); /* set all bits that we expect to always be set */ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ @@ -4212,25 +4436,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev) /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; - /* Only disable hardware filter vlans in promiscuous mode - * if SR-IOV and VMDQ are disabled - otherwise ensure - * that hardware VLAN filters remain enabled. - */ - if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED)) - vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + ixgbe_vlan_promisc_enable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } - vlnctrl |= IXGBE_VLNCTRL_VFE; hw->addr_ctrl.user_set_promisc = false; + ixgbe_vlan_promisc_disable(adapter); } /* @@ -4238,8 +4455,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0)); - if (count < 0) { + if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { fctrl |= IXGBE_FCTRL_UPE; vmolr |= IXGBE_VMOLR_ROPE; } @@ -4275,7 +4491,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev) /* NOTE: VLAN filtering is disabled by setting PROMISC */ } - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) @@ -5042,7 +5257,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int err; - u8 old_addr[ETH_ALEN]; if (ixgbe_removed(hw->hw_addr)) return; @@ -5078,10 +5292,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - /* do not flush user set addresses */ - memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + + /* flush entries out of MAC table */ ixgbe_flush_sw_mac_table(adapter); - ixgbe_mac_set_default_filter(adapter, old_addr); + __dev_uc_unsync(netdev, NULL); + + /* do not flush user set addresses */ + ixgbe_mac_set_default_filter(adapter); /* update SAN MAC vmdq pool selection */ if (hw->mac.san_mac_rar_index) @@ -5331,6 +5548,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, GFP_ATOMIC); + if (!adapter->mac_table) + return -ENOMEM; /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { @@ -6616,10 +6835,8 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - struct pci_dev *vfdev; + unsigned int vf; u32 gpc; - int pos; - unsigned short vf_id; if (!(netif_carrier_ok(adapter->netdev))) return; @@ -6636,26 +6853,17 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) if (!pdev) return; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return; - - /* get the device ID for the VF */ - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); - /* check status reg for all VFs owned by this PF */ - vfdev = pci_get_device(pdev->vendor, vf_id, NULL); - while (vfdev) { - if (vfdev->is_virtfn && (vfdev->physfn == pdev)) { - u16 status_reg; - - pci_read_config_word(vfdev, PCI_STATUS, &status_reg); - if (status_reg & PCI_STATUS_REC_MASTER_ABORT) - /* issue VFLR */ - ixgbe_issue_vf_flr(adapter, vfdev); - } + for (vf = 0; vf < adapter->num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + u16 status_reg; - vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + if (!vfdev) + continue; + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg != IXGBE_FAILED_READ_CFG_WORD && + status_reg & PCI_STATUS_REC_MASTER_ABORT) + ixgbe_issue_vf_flr(adapter, vfdev); } } @@ -7024,6 +7232,7 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct tcphdr *tcphdr; u8 *raw; } transport_hdr; + __be16 frag_off; if (skb->encapsulation) { network_hdr.raw = skb_inner_network_header(skb); @@ -7047,13 +7256,17 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, case 6: vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; l4_hdr = network_hdr.ipv6->nexthdr; + if (likely((transport_hdr.raw - network_hdr.raw) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but version=%d\n", - network_hdr.ipv4->version); - } + break; } switch (l4_hdr) { @@ -7074,16 +7287,18 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, version=%d, l4 proto=%x\n", + network_hdr.ipv4->version, l4_hdr); } - break; + skb_checksum_help(skb); + goto no_csum; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } +no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; @@ -7358,7 +7573,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); - if (skb->encapsulation) { + if (!skb->encapsulation) { + th = tcp_hdr(skb); + } else { #ifdef CONFIG_IXGBE_VXLAN struct ixgbe_adapter *adapter = q_vector->adapter; @@ -7377,14 +7594,34 @@ static void ixgbe_atr(struct ixgbe_ring *ring, #else return; #endif /* CONFIG_IXGBE_VXLAN */ - } else { - /* Currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IPV6) || - hdr.ipv6->nexthdr != IPPROTO_TCP) && - (first->protocol != htons(ETH_P_IP) || - hdr.ipv4->protocol != IPPROTO_TCP)) + } + + /* Currently only IPv4/IPv6 with TCP is supported */ + switch (hdr.ipv4->version) { + case IPVERSION: + if (hdr.ipv4->protocol != IPPROTO_TCP) return; - th = tcp_hdr(skb); + break; + case 6: + if (likely((unsigned char *)th - hdr.network == + sizeof(struct ipv6hdr))) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + } else { + __be16 frag_off; + u8 l4_hdr; + + ipv6_skip_exthdr(skb, hdr.network - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + return; + if (l4_hdr != IPPROTO_TCP) + return; + } + break; + default: + return; } /* skip this packet since it is invalid or the socket is closing */ @@ -7419,10 +7656,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring, common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; - if (first->protocol == htons(ETH_P_IP)) { + switch (hdr.ipv4->version) { + case IPVERSION: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; - } else { + break; + case 6: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ hdr.ipv6->saddr.s6_addr32[1] ^ @@ -7432,6 +7671,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.ipv6->daddr.s6_addr32[1] ^ hdr.ipv6->daddr.s6_addr32[2] ^ hdr.ipv6->daddr.s6_addr32[3]; + break; + default: + break; } #ifdef CONFIG_IXGBE_VXLAN @@ -7659,17 +7901,16 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; - int ret; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); - return ret > 0 ? 0 : ret; + ixgbe_mac_set_default_filter(adapter); + + return 0; } static int @@ -8155,7 +8396,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 pool = VMDQ_P(0); + + if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) return -ENOMEM; } @@ -8387,7 +8631,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_ALL_CSUM; + return features & ~NETIF_F_CSUM_MASK; return features; } @@ -8784,8 +9028,8 @@ skip_sriov: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->features |= NETIF_F_SCTP_CSUM; - netdev->hw_features |= NETIF_F_SCTP_CSUM | + netdev->features |= NETIF_F_SCTP_CRC; + netdev->hw_features |= NETIF_F_SCTP_CRC | NETIF_F_NTUPLE; break; default: @@ -8801,8 +9045,7 @@ skip_sriov: netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; - netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -8811,9 +9054,7 @@ skip_sriov: switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->hw_enc_features |= NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_RXCSUM; break; default: break; @@ -8873,7 +9114,7 @@ skip_sriov: goto err_sw_init; } - ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + ixgbe_mac_set_default_filter(adapter); setup_timer(&adapter->service_timer, &ixgbe_service_timer, (unsigned long) adapter); @@ -9328,6 +9569,12 @@ static int __init ixgbe_init_module(void) pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright); + ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); + if (!ixgbe_wq) { + pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); + return -ENOMEM; + } + ixgbe_dbg_init(); ret = pci_register_driver(&ixgbe_driver); @@ -9359,6 +9606,10 @@ static void __exit ixgbe_exit_module(void) pci_unregister_driver(&ixgbe_driver); ixgbe_dbg_exit(); + if (ixgbe_wq) { + destroy_workqueue(ixgbe_wq); + ixgbe_wq = NULL; + } } #ifdef CONFIG_IXGBE_DCA diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fb8673d63806..db0731e05401 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -2393,6 +2393,9 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return 0; + if (!on && ixgbe_mng_present(hw)) + return 0; + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index e5ba04025e2b..ef1504d41890 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -27,6 +27,7 @@ *******************************************************************************/ #include "ixgbe.h" #include <linux/ptp_classify.h> +#include <linux/clocksource.h> /* * The 82599 and the X540 do not have true 64bit nanosecond scale @@ -93,7 +94,6 @@ #define IXGBE_INCVAL_SHIFT_82599 7 #define IXGBE_INCPER_SHIFT_82599 24 -#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL #define IXGBE_OVERFLOW_PERIOD (HZ * 30) #define IXGBE_PTP_TX_TIMEOUT (HZ * 15) @@ -104,8 +104,68 @@ */ #define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL +/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL + * which contain measurements of seconds and nanoseconds respectively. This + * matches the standard linux representation of time in the kernel. In addition, + * the X550 also has a SYSTIMER register which represents residue, or + * subnanosecond overflow adjustments. To control clock adjustment, the TIMINCA + * register is used, but it is unlike the X540 and 82599 devices. TIMINCA + * represents units of 2^-32 nanoseconds, and uses 31 bits for this, with the + * high bit representing whether the adjustent is positive or negative. Every + * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range + * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the + * X550's clock for purposes of SYSTIME generation is constant and not dependent + * on the link speed. + * + * SYSTIMEH SYSTIMEL SYSTIMER + * +--------------+ +--------------+ +-------------+ + * X550 | 32 | | 32 | | 32 | + * *--------------+ +--------------+ +-------------+ + * \____seconds___/ \_nanoseconds_/ \__2^-32 ns__/ + * + * This results in a full 96 bits to represent the clock, with 32 bits for + * seconds, 32 bits for nanoseconds (largest value is 0d999999999 or just under + * 1 second) and an additional 32 bits to measure sub nanosecond adjustments for + * underflow of adjustments. + * + * The 32 bits of seconds for the X550 overflows every + * 2^32 / ( 365.25 * 24 * 60 * 60 ) = ~136 years. + * + * In order to adjust the clock frequency for the X550, the TIMINCA register is + * provided. This register represents a + or minus nearly 0.5 ns adjustment to + * the base frequency. It is measured in 2^-32 ns units, with the high bit being + * the sign bit. This register enables software to calculate frequency + * adjustments and apply them directly to the clock rate. + * + * The math for converting ppb into TIMINCA values is fairly straightforward. + * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL + * + * This assumes that ppb is never high enough to create a value bigger than + * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this + * value is also simple. + * Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL + * + * For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is + * 12.5 nanoseconds. This means that the Max ppb is 39999999 + * Note: We subtract one in order to ensure no overflow, because the TIMINCA + * register can only hold slightly under 0.5 nanoseconds. + * + * Because TIMINCA is measured in 2^-32 ns units, we have to convert 12.5 ns + * into 2^-32 units, which is + * + * 12.5 * 2^32 = C80000000 + * + * Some revisions of hardware have a faster base frequency than the registers + * were defined for. To fix this, we use a timecounter structure with the + * proper mult and shift to convert the cycles into nanoseconds of time. + */ +#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL +#define INCVALUE_MASK 0x7FFFFFFF +#define ISGN 0x80000000 +#define MAX_TIMADJ 0x7FFFFFFF + /** - * ixgbe_ptp_setup_sdp + * ixgbe_ptp_setup_sdp_x540 * @hw: the hardware private structure * * this function enables or disables the clock out feature on SDP0 for @@ -116,83 +176,116 @@ * aligns the start of the PPS signal to that value. The shift is * necessary because it can change based on the link speed. */ -static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int shift = adapter->cc.shift; + int shift = adapter->hw_cc.shift; u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; u64 ns = 0, clock_edge = 0; - if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && - (hw->mac.type == ixgbe_mac_X540)) { + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); - /* disable the pin first */ - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); - IXGBE_WRITE_FLUSH(hw); + if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) + return; - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* - * enable the SDP0 pin as output, and connected to the - * native function for Timesync (ClockOut) - */ - esdp |= (IXGBE_ESDP_SDP0_DIR | - IXGBE_ESDP_SDP0_NATIVE); + /* enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE; - /* - * enable the Clock Out feature on SDP0, and allow - * interrupts to occur when the pin changes - */ - tsauxc = (IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT); + /* enable the Clock Out feature on SDP0, and allow + * interrupts to occur when the pin changes + */ + tsauxc = IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT; - /* clock period (or pulse length) */ - clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); - clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); + /* clock period (or pulse length) */ + clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); + clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); - /* - * Account for the cyclecounter wrap-around value by - * using the converted ns value of the current time to - * check for when the next aligned second would occur. - */ - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - ns = timecounter_cyc2time(&adapter->tc, clock_edge); + /* Account for the cyclecounter wrap-around value by + * using the converted ns value of the current time to + * check for when the next aligned second would occur. + */ + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); - div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); - clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); + div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); + clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); - /* specify the initial clock start time */ - trgttiml = (u32)clock_edge; - trgttimh = (u32)(clock_edge >> 32); + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); - } else { - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); - } + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); IXGBE_WRITE_FLUSH(hw); } /** - * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) + * ixgbe_ptp_read_X550 - read cycle counter value + * @hw_cc: cyclecounter structure + * + * This function reads SYSTIME registers. It is called by the cyclecounter + * structure to convert from internal representation into nanoseconds. We need + * this for X550 since some skews do not have expected clock frequency and + * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of + * "cycles", rather than seconds and nanoseconds. + */ +static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) +{ + struct ixgbe_adapter *adapter = + container_of(hw_cc, struct ixgbe_adapter, hw_cc); + struct ixgbe_hw *hw = &adapter->hw; + struct timespec64 ts; + + /* storage is 32 bits of 'billions of cycles' and 32 bits of 'cycles'. + * Some revisions of hardware run at a higher frequency and so the + * cycles are not guaranteed to be nanoseconds. The timespec64 created + * here is used for its math/conversions but does not necessarily + * represent nominal time. + * + * It should be noted that this cyclecounter will overflow at a + * non-bitmask field since we have to convert our billions of cycles + * into an actual cycles count. This results in some possible weird + * situations at high cycle counter stamps. However given that 32 bits + * of "seconds" is ~138 years this isn't a problem. Even at the + * increased frequency of some revisions, this is still ~103 years. + * Since the SYSTIME values start at 0 and we never write them, it is + * highly unlikely for the cyclecounter to overflow in practice. + */ + IXGBE_READ_REG(hw, IXGBE_SYSTIMR); + ts.tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ts.tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); + + return (u64)timespec64_to_ns(&ts); +} + +/** + * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure * * this function reads the cyclecounter registers and is called by the * cyclecounter structure used to construct a ns counter from the * arbitrary fixed point registers */ -static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) +static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc) { struct ixgbe_adapter *adapter = - container_of(cc, struct ixgbe_adapter, cc); + container_of(cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; u64 stamp = 0; @@ -203,20 +296,79 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) } /** - * ixgbe_ptp_adjfreq + * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + struct timespec64 systime; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + switch (adapter->hw.mac.type) { + /* X550 and later hardware supposedly represent time using a seconds + * and nanoseconds counter, instead of raw 64bits nanoseconds. We need + * to convert the timestamp into cycles before it can be fed to the + * cyclecounter. We need an actual cyclecounter because some revisions + * of hardware run at a higher frequency and thus the counter does + * not represent seconds/nanoseconds. Instead it can be thought of as + * cycles and billions of cycles. + */ + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* Upper 32 bits represent billions of cycles, lower 32 bits + * represent cycles. However, we use timespec64_to_ns for the + * correct math even though the units haven't been corrected + * yet. + */ + systime.tv_sec = timestamp >> 32; + systime.tv_nsec = timestamp & 0xFFFFFFFF; + + timestamp = timespec64_to_ns(&systime); + break; + default: + break; + } + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * ixgbe_ptp_adjfreq_82599 * @ptp: the ptp clock structure * @ppb: parts per billion adjustment from base * * adjust the frequency of the ptp cycle counter by the * indicated ppb from the base frequency. */ -static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; - u64 freq; - u32 diff, incval; + u64 freq, incval; + u32 diff; int neg_adj = 0; if (ppb < 0) { @@ -235,12 +387,16 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) switch (hw->mac.type) { case ixgbe_mac_X540: - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + if (incval > 0xFFFFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval); break; case ixgbe_mac_82599EB: + if (incval > 0x00FFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (1 << IXGBE_INCPER_SHIFT_82599) | - incval); + ((u32)incval & 0x00FFFFFFUL)); break; default: break; @@ -250,6 +406,43 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) } /** + * ixgbe_ptp_adjfreq_X550 + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the SYSTIME registers by the indicated ppb from base + * frequency + */ +static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; + int neg_adj = 0; + u64 rate = IXGBE_X550_BASE_PERIOD; + u32 inca; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate *= ppb; + rate = div_u64(rate, 1000000000ULL); + + /* warn if rate is too large */ + if (rate >= INCVALUE_MASK) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, inca); + + return 0; +} + +/** * ixgbe_ptp_adjtime * @ptp: the ptp clock structure * @delta: offset to adjust the cycle counter by @@ -263,10 +456,11 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) unsigned long flags; spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_adjtime(&adapter->tc, delta); + timecounter_adjtime(&adapter->hw_tc, delta); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); return 0; } @@ -283,11 +477,11 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); - u64 ns; unsigned long flags; + u64 ns; spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->tc); + ns = timecounter_read(&adapter->hw_tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); *ts = ns_to_timespec64(ns); @@ -308,17 +502,16 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); - u64 ns; unsigned long flags; - - ns = timespec64_to_ns(ts); + u64 ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_init(&adapter->tc, &adapter->cc, ns); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); return 0; } @@ -343,33 +536,26 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, * event when the clock SDP triggers. Clear mask when PPS is * disabled */ - if (rq->type == PTP_CLK_REQ_PPS) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: - if (on) - adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; - else - adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - - ixgbe_ptp_setup_sdp(adapter); - return 0; - default: - break; - } - } + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) + return -ENOTSUPP; + + if (on) + adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - return -ENOTSUPP; + adapter->ptp_setup_sdp(adapter); + return 0; } /** * ixgbe_ptp_check_pps_event * @adapter: the private adapter structure - * @eicr: the interrupt cause register value * * This function is called by the interrupt routine when checking for * interrupts. It will check and handle a pps event. */ -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; @@ -425,7 +611,9 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + struct ixgbe_ring *rx_ring; unsigned long rx_event; + int n; /* if we don't have a valid timestamp in the registers, just update the * timeout counter and exit @@ -437,19 +625,43 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) /* determine the most recent watchdog or rx_timestamp event */ rx_event = adapter->last_rx_ptp_check; - if (time_after(adapter->last_rx_timestamp, rx_event)) - rx_event = adapter->last_rx_timestamp; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } /* only need to read the high RXSTMP register to clear the lock */ - if (time_is_before_jiffies(rx_event + 5*HZ)) { + if (time_is_before_jiffies(rx_event + 5 * HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; e_warn(drv, "clearing RX Timestamp hang\n"); } } /** + * ixgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_READ_REG(hw, IXGBE_TXSTMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: the private adapter struct * @@ -461,23 +673,15 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; - u64 regval = 0, ns; - unsigned long flags; + u64 regval = 0; regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_cyc2time(&adapter->tc, regval); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ns_to_ktime(ns); + ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + ixgbe_ptp_clear_tx_timestamp(adapter); } /** @@ -497,38 +701,85 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) IXGBE_PTP_TX_TIMEOUT); u32 tsynctxctl; - if (timeout) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - e_warn(drv, "clearing Tx Timestamp hang\n"); + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ixgbe_ptp_clear_tx_timestamp(adapter); return; } + /* stop polling once we have a valid timestamp */ tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) + if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) { ixgbe_ptp_tx_hwtstamp(adapter); - else + return; + } + + if (timeout) { + ixgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang\n"); + } else { /* reschedule to keep checking if it's not available yet */ schedule_work(&adapter->ptp_tx_work); + } } /** - * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp - * @adapter: pointer to adapter struct + * ixgbe_ptp_rx_pktstamp - utility function to get RX time stamp from buffer + * @q_vector: structure containing interrupt and ring information + * @skb: the packet + * + * This function will be called by the Rx routine of the timestamp for this + * packet is stored in the buffer. The value is stored in little endian format + * starting at the end of the packet data. + */ +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + __le64 regval; + + /* copy the bits out of the skb, and then trim the skb length */ + skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, + IXGBE_TS_HDR_LEN); + __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); + + /* The timestamp is recorded in little endian format, and is stored at + * the end of the packet. + * + * DWORD: N N + 1 N + 2 + * Field: End of Packet SYSTIMH SYSTIML + */ + ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), + le64_to_cpu(regval)); +} + +/** + * ixgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { - struct ixgbe_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps *shhwtstamps; - u64 regval = 0, ns; + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + u64 regval = 0; u32 tsyncrxctl; - unsigned long flags; + + /* we cannot process timestamps on a ring without a q_vector */ + if (!q_vector || !q_vector->adapter) + return; + + adapter = q_vector->adapter; + hw = &adapter->hw; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) @@ -537,17 +788,7 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_cyc2time(&adapter->tc, regval); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - - /* Update the last_rx_timestamp timer in order to enable watchdog check - * for error case of latched timestamp on a dropped packet. - */ - adapter->last_rx_timestamp = jiffies; + ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) @@ -610,14 +851,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -631,9 +878,21 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: + /* The X550 controller is capable of timestamping all packets, + * which allows it to accept any filter. + */ + if (hw->mac.type >= ixgbe_mac_X550) { + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + break; + } + /* fall through */ default: /* * register RXMTRL must be set in order to do V1 packets, @@ -641,16 +900,46 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } if (hw->mac.type == ixgbe_mac_82598EB) { + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); if (tsync_rx_ctl | tsync_tx_ctl) return -ERANGE; return 0; } + /* Per-packet timestamping only works if the filter is set to all + * packets. Since this is desired, always timestamp all packets as long + * as any Rx filter was configured. + */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* enable timestamping all packets only if at least some + * packets were requested. Otherwise, play nice and disable + * timestamping + */ + if (config->rx_filter == HWTSTAMP_FILTER_NONE) + break; + + tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED | + IXGBE_TSYNCRXCTL_TYPE_ALL | + IXGBE_TSYNCRXCTL_TSIP_UT_EN; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + is_l2 = true; + break; + default: + break; + } + /* define ethertype filter for timestamping L2 packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), @@ -678,8 +967,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); /* clear TX/RX time stamp registers, just to be sure */ - regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); - regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + ixgbe_ptp_clear_tx_timestamp(adapter); + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); return 0; } @@ -712,23 +1001,9 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) -EFAULT : 0; } -/** - * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw - * @adapter: pointer to the adapter structure - * - * This function should be called to set the proper values for the TIMINCA - * register and tell the cyclecounter structure what the tick rate of SYSTIME - * is. It does not directly modify SYSTIME registers or the timecounter - * structure. It should be called whenever a new TIMINCA value is necessary, - * such as during initialization or when the link speed changes. - */ -void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, + u32 *shift, u32 *incval) { - struct ixgbe_hw *hw = &adapter->hw; - u32 incval = 0; - u32 shift = 0; - unsigned long flags; - /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added @@ -745,36 +1020,98 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) */ switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: - incval = IXGBE_INCVAL_100; - shift = IXGBE_INCVAL_SHIFT_100; + *shift = IXGBE_INCVAL_SHIFT_100; + *incval = IXGBE_INCVAL_100; break; case IXGBE_LINK_SPEED_1GB_FULL: - incval = IXGBE_INCVAL_1GB; - shift = IXGBE_INCVAL_SHIFT_1GB; + *shift = IXGBE_INCVAL_SHIFT_1GB; + *incval = IXGBE_INCVAL_1GB; break; case IXGBE_LINK_SPEED_10GB_FULL: default: - incval = IXGBE_INCVAL_10GB; - shift = IXGBE_INCVAL_SHIFT_10GB; + *shift = IXGBE_INCVAL_SHIFT_10GB; + *incval = IXGBE_INCVAL_10GB; break; } +} - /** - * Modify the calculated values to fit within the correct - * number of bits specified by the hardware. The 82599 doesn't - * have the same space as the X540, so bitshift the calculated - * values to fit. +/** + * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct cyclecounter cc; + unsigned long flags; + u32 incval = 0; + u32 tsauxc = 0; + u32 fuse0 = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + switch (hw->mac.type) { + case ixgbe_mac_X550EM_x: + /* SYSTIME assumes X550EM_x board frequency is 300Mhz, and is + * designed to represent seconds and nanoseconds when this is + * the case. However, some revisions of hardware have a 400Mhz + * clock and we have to compensate for this frequency + * variation using corrected mult and shift values. + */ + fuse0 = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); + if (!(fuse0 & IXGBE_FUSES0_300MHZ)) { + cc.mult = 3; + cc.shift = 2; + } + /* fallthrough */ + case ixgbe_mac_X550: + cc.read = ixgbe_ptp_read_X550; + + /* enable SYSTIME counter */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); + + IXGBE_WRITE_FLUSH(hw); + break; case ixgbe_mac_X540: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); break; case ixgbe_mac_82599EB: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); incval >>= IXGBE_INCVAL_SHIFT_82599; - shift -= IXGBE_INCVAL_SHIFT_82599; + cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | - incval); + (1 << IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ @@ -787,13 +1124,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) /* need lock to prevent incorrect read while modifying cyclecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); - - memset(&adapter->cc, 0, sizeof(adapter->cc)); - adapter->cc.read = ixgbe_ptp_read; - adapter->cc.mask = CYCLECOUNTER_MASK(64); - adapter->cc.shift = shift; - adapter->cc.mult = 1; - + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); } @@ -814,29 +1145,27 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; - /* set SYSTIME registers to 0 just in case */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); - IXGBE_WRITE_FLUSH(hw); - /* reset the hardware timestamping mode */ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + /* 82598 does not support PTP */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + ixgbe_ptp_start_cyclecounter(adapter); spin_lock_irqsave(&adapter->tmreg_lock, flags); - - /* reset the ns time counter */ - timecounter_init(&adapter->tc, &adapter->cc, + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - /* - * Now that the shift has been calculated and the systime + adapter->last_overflow_check = jiffies; + + /* Now that the shift has been calculated and the systime * registers reset, (re-)enable the Clock out feature */ - ixgbe_ptp_setup_sdp(adapter); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); } /** @@ -845,11 +1174,11 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) * * This function performs setup of the user entry point function table and * initializes the PTP clock device, which is used to access the clock-like - * features of the PTP core. It will be called by ixgbe_ptp_init, only if - * there isn't already a clock device (such as after a suspend/resume cycle, - * where the clock device wasn't destroyed). + * features of the PTP core. It will be called by ixgbe_ptp_init, and may + * reuse a previously initialized clock (such as during a suspend/resume + * cycle). */ -static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) +static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; long err; @@ -869,11 +1198,12 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 1; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; break; case ixgbe_mac_82599EB: snprintf(adapter->ptp_caps.name, @@ -885,14 +1215,31 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 30000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = NULL; break; default: adapter->ptp_clock = NULL; + adapter->ptp_setup_sdp = NULL; return -EOPNOTSUPP; } @@ -961,18 +1308,13 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) return; - /* since this might be called in suspend, we don't clear the state, - * but simply reset the auxiliary PPS signal control register - */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0); + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); /* ensure that we cancel any pending PTP Tx work item in progress */ cancel_work_sync(&adapter->ptp_tx_work); - if (adapter->ptp_tx_skb) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - } + ixgbe_ptp_clear_tx_timestamp(adapter); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index fcd8b27a0ccb..8025a3f93598 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -130,6 +130,38 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) return -ENOMEM; } +/** + * ixgbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + pci_dev_get(vfdev); + adapter->vfinfo[vf].vfdev = vfdev; + ++vf; + } +} + /* Note this function is called when the user wants to enable SR-IOV * VFs using the now deprecated module parameter */ @@ -170,8 +202,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) } } - if (!__ixgbe_enable_sriov(adapter)) + if (!__ixgbe_enable_sriov(adapter)) { + ixgbe_get_vfs(adapter); return; + } /* If we have gotten to this point then there is no memory available * to manage the VF devices - print message and bail. @@ -184,6 +218,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) #endif /* #ifdef CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { + unsigned int num_vfs = adapter->num_vfs, vf; struct ixgbe_hw *hw = &adapter->hw; u32 gpie; u32 vmdctl; @@ -192,6 +227,16 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + adapter->vfinfo[vf].vfdev = NULL; + pci_dev_put(vfdev); + } + /* free VF control structures */ kfree(adapter->vfinfo); adapter->vfinfo = NULL; @@ -289,6 +334,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) e_dev_warn("Failed to enable PCI sriov: %d\n", err); return err; } + ixgbe_get_vfs(adapter); ixgbe_sriov_reinit(adapter); return num_vfs; @@ -406,11 +452,34 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { - /* VLAN 0 is a special case, don't allow it to be removed */ - if (!vid && !add) - return 0; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); + if (err) + return err; + } + + err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); - return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + ixgbe_update_pf_promisc_vlvf(adapter, vid); + + return err; } static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) @@ -516,13 +585,75 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); } + +static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i; + + /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ + for (i = IXGBE_VLVF_ENTRIES; i--;) { + u32 bits[2], vlvfb, vid, vfta, vlvf; + u32 word = i * 2 + vf / 32; + u32 mask = 1 << (vf % 32); + + vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* if our bit isn't set we can skip it */ + if (!(vlvfb & mask)) + continue; + + /* clear our bit from vlvfb */ + vlvfb ^= mask; + + /* create 64b mask to chedk to see if we should clear VLVF */ + bits[word % 2] = vlvfb; + bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); + + /* if promisc is enabled, PF will be present, leave VFTA */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) { + bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32)); + + if (bits[0] || bits[1]) + goto update_vlvfb; + goto update_vlvf; + } + + /* if other pools are present, just remove ourselves */ + if (bits[0] || bits[1]) + goto update_vlvfb; + + /* if we cannot determine VLAN just remove ourselves */ + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + if (!vlvf) + goto update_vlvfb; + + vid = vlvf & VLAN_VID_MASK; + mask = 1 << (vid % 32); + + /* clear bit from VFTA */ + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); + if (vfta & mask) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); +update_vlvf: + /* clear POOL selection enable */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); +update_vlvfb: + /* clear pool bits */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); + } +} + static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; u8 num_tcs = netdev_get_num_tc(adapter->netdev); - /* add PF assigned VLAN or VLAN 0 */ + /* remove VLAN filters beloning to this VF */ + ixgbe_clear_vf_vlans(adapter, vf); + + /* add back PF assigned VLAN or VLAN 0 */ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); /* reset offloads to defaults */ @@ -768,40 +899,14 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } -static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) -{ - u32 vlvf; - s32 regindex; - - /* short cut the special case */ - if (vlan == 0) - return 0; - - /* Search for the vlan id in the VLVF entries */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if ((vlvf & VLAN_VID_MASK) == vlan) - break; - } - - /* Return a negative value if not found */ - if (regindex >= IXGBE_VLVF_ENTRIES) - regindex = -1; - - return regindex; -} - static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { + u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + u8 tcs = netdev_get_num_tc(adapter->netdev); struct ixgbe_hw *hw = &adapter->hw; - int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; - int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); int err; - s32 reg_ndx; - u32 vlvf; - u32 bits; - u8 tcs = netdev_get_num_tc(adapter->netdev); if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -811,54 +916,23 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, return -1; } - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - - /* in case of promiscuous mode any VLAN filter set for a VF must - * also have the PF pool added to it. - */ - if (add && adapter->netdev->flags & IFF_PROMISC) - err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; err = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (!err && adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + if (err) + return err; - /* Go through all the checks to see if the VLAN filter should - * be wiped completely. - */ - if (!add && adapter->netdev->flags & IFF_PROMISC) { - reg_ndx = ixgbe_find_vlvf_entry(hw, vid); - if (reg_ndx < 0) - return err; - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - if (VMDQ_P(0) < 32) { - bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - bits &= ~(1 << VMDQ_P(0)); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - bits &= ~(1 << (VMDQ_P(0) - 32)); - bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - } + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - /* If the filter was removed then ensure PF pool bit - * is cleared if the PF only added itself to the pool - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && - !test_bit(vid, adapter->active_vlans) && !bits) - ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); - } + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; - return err; + return 0; } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -1239,6 +1313,9 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, if (err) goto out; + /* Revoke tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, false, 0, vf); + ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); if (adapter->vfinfo[vf].spoofchk_enabled) @@ -1272,6 +1349,8 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) err = ixgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); + /* Restore tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 995f03107eac..bf7367a08716 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1020,6 +1020,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ #define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ #define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ #define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ #define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ #define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ @@ -1036,6 +1037,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ #define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ #define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ /* Diagnostic Registers */ #define IXGBE_RDSTATCTL 0x02C20 @@ -1345,7 +1347,10 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ #define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* global fault msg */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ #define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ /* autoneg vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 @@ -1353,6 +1358,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ #define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /*int dev fault enable */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ @@ -2209,6 +2215,7 @@ enum { #define IXGBE_TSAUXC_EN_CLK 0x00000004 #define IXGBE_TSAUXC_SYNCLK 0x00000008 #define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ @@ -2218,8 +2225,12 @@ enum { #define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 #define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 #define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 #define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ + +#define IXGBE_TSIM_TXTS 0x00000002 #define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF #define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 @@ -2332,6 +2343,7 @@ enum { #define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ #define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ #define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ #define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ #define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ #define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ @@ -2768,6 +2780,7 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ #define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ @@ -3288,7 +3301,7 @@ struct ixgbe_mac_operations { s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); s32 (*init_uta_tables)(struct ixgbe_hw *); void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); @@ -3508,7 +3521,7 @@ struct ixgbe_info { #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV1 BIT(6) +#define IXGBE_FUSES0_REV_MASK (3 << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index c1d4584f6469..2358c1b7d586 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -57,8 +57,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) struct ixgbe_phy_info *phy = &hw->phy; /* set_phy_power was set by default to NULL */ - if (!ixgbe_mng_present(hw)) - phy->ops.set_phy_power = ixgbe_set_copper_phy_power; + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; @@ -110,13 +109,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { @@ -154,12 +154,16 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ebe0ac950b14..87aca3f7c3de 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -26,6 +26,8 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); + static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -85,79 +87,6 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) } /** - * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register - * @hw: pointer to hardware structure - * @reg: the register to check - * - * Performs a diagnostic on a register in the CS4227 chip. Returns an error - * if it is not operating correctly. - * This function assumes that the caller has acquired the proper semaphore. - */ -static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg) -{ - s32 status; - u32 retry; - u16 reg_val; - - reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1; - status = ixgbe_write_cs4227(hw, reg, reg_val); - if (status) - return status; - for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { - msleep(IXGBE_CS4227_CHECK_DELAY); - reg_val = 0xFFFF; - ixgbe_read_cs4227(hw, reg, ®_val); - if (!reg_val) - break; - } - if (reg_val) { - hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg); - return status; - } - - return 0; -} - -/** - * ixgbe_get_cs4227_status - Return CS4227 status - * @hw: pointer to hardware structure - * - * Performs a diagnostic on the CS4227 chip. Returns an error if it is - * not operating correctly. - * This function assumes that the caller has acquired the proper semaphore. - */ -static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw) -{ - s32 status; - u16 value = 0; - - /* Exit if the diagnostic has already been performed. */ - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); - if (status) - return status; - if (value == IXGBE_CS4227_RESET_COMPLETE) - return 0; - - /* Check port 0. */ - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB); - if (status) - return status; - - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB); - if (status) - return status; - - /* Check port 1. */ - status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB + - (1 << 12)); - if (status) - return status; - - return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB + - (1 << 12)); -} - -/** * ixgbe_read_pe - Read register from port expander * @hw: pointer to hardware structure * @reg: register number to read @@ -326,13 +255,6 @@ static void ixgbe_check_cs4227(struct ixgbe_hw *hw) return; } - /* Is the CS4227 working correctly? */ - status = ixgbe_get_cs4227_status(hw); - if (status) { - hw_err(hw, "CS4227 status failed: %d", status); - goto out; - } - /* Record completion for next time. */ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_COMPLETE); @@ -1257,31 +1179,71 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, if (status) return status; - /* Configure CS4227 LINE side to 10G SR. */ - slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); - value = IXGBE_CS4227_SPEED_10G; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); - - /* Configure CS4227 for HOST connection rate then type. */ - slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); - value = speed & IXGBE_LINK_SPEED_10GB_FULL ? - IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* Configure CS4227 LINE side to 10G SR. */ + slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); + value = IXGBE_CS4227_SPEED_10G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; - slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); - if (setup_linear) - value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; - else + slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; - status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice, - value); + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + + /* Configure CS4227 for HOST connection rate then type. */ + slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); + value = speed & IXGBE_LINK_SPEED_10GB_FULL ? + IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; - /* If internal link mode is XFI, then setup XFI internal link. */ - if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) + slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + + /* Setup XFI internal link. */ status = ixgbe_setup_ixfi_x550em(hw, &speed); + if (status) { + hw_dbg(hw, "setup_ixfi failed with %d\n", status); + return status; + } + } else { + /* Configure internal PHY for KR/KX. */ + status = ixgbe_setup_kr_speed_x550em(hw, speed); + if (status) { + hw_dbg(hw, "setup_kr_speed failed with %d\n", status); + return status; + } + /* Configure CS4227 LINE side to proper mode. */ + slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, + slice, value); + if (status) + goto i2c_err; + } + + return 0; + +i2c_err: + hw_dbg(hw, "combined i2c access failed with %d\n", status); return status; } @@ -1482,7 +1444,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) IXGBE_MDIO_GLOBAL_ALARM_1_INT))) return status; - /* High temperature failure alarm triggered */ + /* Global alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); @@ -1496,6 +1458,21 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; } + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + } /* Vendor alarm 2 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, @@ -1549,14 +1526,15 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) if (status) return status; - /* Enables high temperature failure alarm */ + /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status) return status; - reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN; + reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, @@ -1765,6 +1743,12 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; + if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); + } + /* If link is not up, then there is no setup necessary so return */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) @@ -1873,10 +1857,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) u32 save_autoneg; bool link_up; - /* SW LPLU not required on later HW revisions. */ - if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))) - return 0; - /* If blocked by MNG FW, then don't restart AN */ if (ixgbe_check_reset_blocked(hw)) return 0; @@ -1969,7 +1949,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; - ixgbe_link_speed speed; s32 ret_val; hw->mac.ops.set_lan_id(hw); @@ -1982,13 +1961,6 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) * to determine internal PHY mode. */ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - - /* If internal PHY mode is KR, then initialize KR link */ - if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); - } } /* Identify the PHY or SFP module */ @@ -2020,18 +1992,13 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) /* If internal link mode is XFI, then setup iXFI internal link, * else setup KR now. */ - if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - phy->ops.setup_internal_link = - ixgbe_setup_internal_phy_t_x550em; - } else { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); - } + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; /* setup SW LPLU only for first revision */ - if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, - IXGBE_FUSES0_GROUP(0)))) + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) & + IXGBE_FUSES0_REV_MASK)) phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; @@ -2176,13 +2143,14 @@ mac_reset_top: ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { - udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; + udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d3e5f5b37999..c48aef613b0a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -774,7 +774,7 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, adapter->tx_itr_setting = ec->tx_coalesce_usecs; if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_10K_ITR; + tx_itr_param = IXGBE_12K_ITR; else tx_itr_param = adapter->tx_itr_setting; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ec3147279621..68ec7daa04fd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -326,8 +326,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 -#define IXGBE_10K_ITR 400 -#define IXGBE_8K_ITR 500 +#define IXGBE_12K_ITR 336 /* Helper macros to switch between ints/sec and what the register uses. * And yes, it's the same math going both ways. The lowest value diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 592ff237d692..3558f019b631 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -59,7 +59,7 @@ static const char ixgbevf_driver_string[] = #define DRV_VERSION "2.12.1-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2012 Intel Corporation."; + "Copyright (c) 2009 - 2015 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -96,12 +96,14 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +static struct workqueue_struct *ixgbevf_wq; + static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) { if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) - schedule_work(&adapter->service_task); + queue_work(ixgbevf_wq, &adapter->service_task); } static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) @@ -1014,6 +1016,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + if (budget <= 0) + return budget; #ifdef CONFIG_NET_RX_BUSY_POLL if (!ixgbevf_qv_lock_napi(q_vector)) return budget; @@ -1043,7 +1047,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ napi_complete_done(napi, work_done); - if (adapter->rx_itr_setting & 1) + if (adapter->rx_itr_setting == 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) @@ -1138,7 +1142,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) if (q_vector->tx.ring && !q_vector->rx.ring) { /* Tx only vector */ if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_10K_ITR; + q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { @@ -1196,7 +1200,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, /* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) - * 100-1249MB/s bulk (8000 ints/s) + * 100-1249MB/s bulk (12000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; @@ -1246,8 +1250,9 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) new_itr = IXGBE_20K_ITR; break; case bulk_latency: + new_itr = IXGBE_12K_ITR; + break; default: - new_itr = IXGBE_8K_ITR; break; } @@ -1288,7 +1293,7 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -1332,7 +1337,6 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) int txr_remaining = adapter->num_tx_queues; int i, j; int rqpv, tqpv; - int err = 0; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; @@ -1345,7 +1349,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) for (; txr_idx < txr_remaining; v_start++, txr_idx++) map_vector_to_txq(adapter, v_start, txr_idx); - goto out; + return 0; } /* If we don't have enough vectors for a 1-to-1 @@ -1370,8 +1374,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) } } -out: - return err; + return 0; } /** @@ -1469,9 +1472,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) **/ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) { - int err = 0; - - err = ixgbevf_request_msix_irqs(adapter); + int err = ixgbevf_request_msix_irqs(adapter); if (err) hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); @@ -1830,7 +1831,7 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int err = -EOPNOTSUPP; + int err; spin_lock_bh(&adapter->mbx_lock); @@ -2046,7 +2047,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; - int err = 0, idx = 0; + int err, idx = 0; spin_lock_bh(&adapter->mbx_lock); @@ -2260,10 +2261,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) } if (is_valid_ether_addr(adapter->hw.mac.addr)) { - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, - netdev->addr_len); + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } adapter->last_reset = jiffies; @@ -2421,7 +2420,7 @@ err_allocation: static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int err = 0; + int err; int vector, v_budget; /* It's easy to be greedy for MSI-X vectors, but it really @@ -2439,26 +2438,21 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) { - err = -ENOMEM; - goto out; - } + if (!adapter->msix_entries) + return -ENOMEM; for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; err = ixgbevf_acquire_msix_vectors(adapter, v_budget); if (err) - goto out; + return err; err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) - goto out; - - err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + return err; -out: - return err; + return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); } /** @@ -2483,9 +2477,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); -#ifdef CONFIG_NET_RX_BUSY_POLL - napi_hash_add(&q_vector->napi); -#endif adapter->q_vector[q_idx] = q_vector; } @@ -2662,13 +2653,14 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, hw->mac.addr); } if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address\n"); eth_hw_addr_random(netdev); - memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); + ether_addr_copy(hw->mac.addr, netdev->dev_addr); + ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); } /* Enable dynamic interrupt throttling rates */ @@ -3355,6 +3347,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; + __be16 frag_off; switch (first->protocol) { case htons(ETH_P_IP): @@ -3365,13 +3358,16 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, case htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; + if (likely(skb_network_header_len(skb) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); - } break; } @@ -3393,16 +3389,18 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, l3 proto=%x, l4 proto=%x\n", + first->protocol, l4_hdr); } - break; + skb_checksum_help(skb); + goto no_csum; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } +no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; @@ -3698,8 +3696,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + ether_addr_copy(hw->mac.addr, addr->sa_data); spin_lock_bh(&adapter->mbx_lock); @@ -4248,15 +4246,17 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { - int ret; - pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); pr_info("%s\n", ixgbevf_copyright); + ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); + if (!ixgbevf_wq) { + pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name); + return -ENOMEM; + } - ret = pci_register_driver(&ixgbevf_driver); - return ret; + return pci_register_driver(&ixgbevf_driver); } module_init(ixgbevf_init_module); @@ -4270,6 +4270,10 @@ module_init(ixgbevf_init_module); static void __exit ixgbevf_exit_module(void) { pci_unregister_driver(&ixgbevf_driver); + if (ixgbevf_wq) { + destroy_workqueue(ixgbevf_wq); + ixgbevf_wq = NULL; + } } #ifdef DEBUG diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 427f3605cbfc..61a98f4c5746 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -117,7 +117,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - ether_addr_copy(hw->mac.perm_addr, addr); + if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + ether_addr_copy(hw->mac.perm_addr, addr); + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 060dd3922974..b1de7afd4116 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2753,7 +2753,7 @@ static netdev_features_t jme_fix_features(struct net_device *netdev, netdev_features_t features) { if (netdev->mtu > 1900) - features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK); return features; } diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 581928c068f2..b630ef1e9646 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -375,22 +375,16 @@ static int ltq_etop_mdio_probe(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; + struct phy_device *phydev; - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (priv->mii_bus->phy_map[phy_addr]) { - phydev = priv->mii_bus->phy_map[phy_addr]; - break; - } - } + phydev = phy_find_first(priv->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); return -ENODEV; } - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), <q_etop_mdio_link, priv->pldata->mii_mode); if (IS_ERR(phydev)) { @@ -408,9 +402,7 @@ ltq_etop_mdio_probe(struct net_device *dev) phydev->advertising = phydev->supported; priv->phydev = phydev; - pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, - dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); return 0; } @@ -435,18 +427,9 @@ ltq_etop_mdio_init(struct net_device *dev) priv->mii_bus->name = "ltq_mii"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", priv->pdev->name, priv->pdev->id); - priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; ++i) - priv->mii_bus->irq[i] = PHY_POLL; - if (mdiobus_register(priv->mii_bus)) { err = -ENXIO; - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; } if (ltq_etop_mdio_probe(dev)) { @@ -457,8 +440,6 @@ ltq_etop_mdio_init(struct net_device *dev) err_out_unregister_bus: mdiobus_unregister(priv->mii_bus); -err_out_free_mdio_irq: - kfree(priv->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(priv->mii_bus); err_out: @@ -472,7 +453,6 @@ ltq_etop_mdio_cleanup(struct net_device *dev) phy_disconnect(priv->phydev); mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4182290fdbcf..a0c03834a2f7 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -3133,7 +3133,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (!mp->phy) err = -ENODEV; else - phy_addr_set(mp, mp->phy->addr); + phy_addr_set(mp, mp->phy->mdio.addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); @@ -3257,25 +3257,20 @@ static struct platform_driver mv643xx_eth_driver = { }, }; +static struct platform_driver * const drivers[] = { + &mv643xx_eth_shared_driver, + &mv643xx_eth_driver, +}; + static int __init mv643xx_eth_init_module(void) { - int rc; - - rc = platform_driver_register(&mv643xx_eth_shared_driver); - if (!rc) { - rc = platform_driver_register(&mv643xx_eth_driver); - if (rc) - platform_driver_unregister(&mv643xx_eth_shared_driver); - } - - return rc; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(mv643xx_eth_init_module); static void __exit mv643xx_eth_cleanup_module(void) { - platform_driver_unregister(&mv643xx_eth_driver); - platform_driver_unregister(&mv643xx_eth_shared_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(mv643xx_eth_cleanup_module); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index fc2fb25343f4..8982c882af1b 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -187,7 +187,7 @@ static int orion_mdio_probe(struct platform_device *pdev) struct resource *r; struct mii_bus *bus; struct orion_mdio_dev *dev; - int i, ret; + int ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -207,14 +207,6 @@ static int orion_mdio_probe(struct platform_device *pdev) dev_name(&pdev->dev)); bus->parent = &pdev->dev; - bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!bus->irq) - return -ENOMEM; - - for (i = 0; i < PHY_MAX_ADDR; i++) - bus->irq[i] = PHY_POLL; - dev = bus->priv; dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!dev->regs) { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ed622fa29dfa..fabc8df40392 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -110,9 +110,17 @@ #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 +#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) +#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) -/* Exception Interrupt Port/Queue Cause register */ +/* Exception Interrupt Port/Queue Cause register + * + * Their behavior depend of the mapping done using the PCPX2Q + * registers. For a given CPU if the bit associated to a queue is not + * set, then for the register a read from this CPU will always return + * 0 and a write won't do anything + */ #define MVNETA_INTR_NEW_CAUSE 0x25a0 #define MVNETA_INTR_NEW_MASK 0x25a4 @@ -254,6 +262,11 @@ #define MVNETA_TX_MTU_MAX 0x3ffff +/* The RSS lookup table actually has 256 entries but we do not use + * them yet + */ +#define MVNETA_RSS_LU_TABLE_SIZE 1 + /* TSO header size */ #define TSO_HEADER_SIZE 128 @@ -356,6 +369,7 @@ struct mvneta_port { struct mvneta_tx_queue *txqs; struct net_device *dev; struct notifier_block cpu_notifier; + int rxq_def; /* Core clock */ struct clk *clk; @@ -371,9 +385,11 @@ struct mvneta_port { unsigned int duplex; unsigned int speed; unsigned int tx_csum_limit; - int use_inband_status:1; + unsigned int use_inband_status:1; u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; + + u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the @@ -499,6 +515,9 @@ struct mvneta_tx_queue { /* DMA address of TSO headers */ dma_addr_t tso_hdrs_phys; + + /* Affinity mask for CPUs*/ + cpumask_t affinity_mask; }; struct mvneta_rx_queue { @@ -819,7 +838,13 @@ static void mvneta_port_up(struct mvneta_port *pp) mvreg_write(pp, MVNETA_TXQ_CMD, q_map); /* Enable all initialized RXQs. */ - mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + if (rxq->descs != NULL) + q_map |= (1 << queue); + } + mvreg_write(pp, MVNETA_RXQ_CMD, q_map); } /* Stop the Ethernet port activity */ @@ -973,6 +998,44 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); } +static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) +{ + u32 val; + + if (enable) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | + MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_AN_FLOW_CTRL_EN); + val |= MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); + + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + val |= MVNETA_GMAC2_INBAND_AN_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); + } else { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); + + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); + } +} + /* This method sets defaults to the NETA port: * Clears interrupt Cause and Mask registers. * Clears all MAC tables. @@ -987,6 +1050,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) int cpu; int queue; u32 val; + int max_cpu = num_present_cpus(); /* Clear all Cause registers */ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); @@ -1002,13 +1066,33 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Enable MBUS Retry bit16 */ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); - /* Set CPU queue access map - all CPUs have access to all RX - * queues and to all TX queues + /* Set CPU queue access map. CPUs are assigned to the RX and + * TX queues modulo their number. If there is only one TX + * queue then it is assigned to the CPU associated to the + * default RX queue. */ - for_each_present_cpu(cpu) - mvreg_write(pp, MVNETA_CPU_MAP(cpu), - (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | - MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); + for_each_present_cpu(cpu) { + int rxq_map = 0, txq_map = 0; + int rxq, txq; + + for (rxq = 0; rxq < rxq_number; rxq++) + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + + for (txq = 0; txq < txq_number; txq++) + if ((txq % max_cpu) == cpu) + txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); + + /* With only one TX queue we configure a special case + * which will allow to get all the irq on a single + * CPU + */ + if (txq_number == 1) + txq_map = (cpu == pp->rxq_def) ? + MVNETA_CPU_TXQ_ACCESS(1) : 0; + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + } /* Reset RX and TX DMAs */ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); @@ -1029,7 +1113,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) mvreg_write(pp, MVNETA_ACC_MODE, val); /* Update val of portCfg register accordingly with all RxQueue types */ - val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); mvreg_write(pp, MVNETA_PORT_CONFIG, val); val = 0; @@ -1058,26 +1142,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) val &= ~MVNETA_PHY_POLLING_ENABLE; mvreg_write(pp, MVNETA_UNIT_CONTROL, val); - if (pp->use_inband_status) { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_AN_FLOW_CTRL_EN); - val |= MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); - } else { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN); - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - } - + mvneta_set_autoneg(pp, pp->use_inband_status); mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2082,19 +2147,19 @@ static void mvneta_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_PROMISC) { /* Accept all: Multicast + Unicast */ mvneta_rx_unicast_promisc_set(pp, 1); - mvneta_set_ucast_table(pp, rxq_def); - mvneta_set_special_mcast_table(pp, rxq_def); - mvneta_set_other_mcast_table(pp, rxq_def); + mvneta_set_ucast_table(pp, pp->rxq_def); + mvneta_set_special_mcast_table(pp, pp->rxq_def); + mvneta_set_other_mcast_table(pp, pp->rxq_def); } else { /* Accept single Unicast */ mvneta_rx_unicast_promisc_set(pp, 0); mvneta_set_ucast_table(pp, -1); - mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); + mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); if (dev->flags & IFF_ALLMULTI) { /* Accept all multicast */ - mvneta_set_special_mcast_table(pp, rxq_def); - mvneta_set_other_mcast_table(pp, rxq_def); + mvneta_set_special_mcast_table(pp, pp->rxq_def); + mvneta_set_other_mcast_table(pp, pp->rxq_def); } else { /* Accept only initialized multicast */ mvneta_set_special_mcast_table(pp, -1); @@ -2103,7 +2168,7 @@ static void mvneta_set_rx_mode(struct net_device *dev) if (!netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) { mvneta_mcast_addr_set(pp, ha->addr, - rxq_def); + pp->rxq_def); } } } @@ -2154,6 +2219,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget) { int rx_done = 0; u32 cause_rx_tx; + int rx_queue; struct mvneta_port *pp = netdev_priv(napi->dev); struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); @@ -2185,8 +2251,15 @@ static int mvneta_poll(struct napi_struct *napi, int budget) /* For the case where the last mvneta_poll did not process all * RX packets */ + rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); + cause_rx_tx |= port->cause_rx_tx; - rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); + + if (rx_queue) { + rx_queue = rx_queue - 1; + rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]); + } + budget -= rx_done; if (budget > 0) { @@ -2303,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, static int mvneta_txq_init(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + int cpu; + txq->size = pp->tx_ring_size; /* A queue must always have room for at least one skb. @@ -2355,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta_port *pp, } mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); + /* Setup XPS mapping */ + if (txq_number > 1) + cpu = txq->id % num_present_cpus(); + else + cpu = pp->rxq_def % num_present_cpus(); + cpumask_set_cpu(cpu, &txq->affinity_mask); + netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); + return 0; } @@ -2399,19 +2482,27 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp) /* Cleanup all Rx queues */ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { - mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]); + int queue; + + for (queue = 0; queue < txq_number; queue++) + mvneta_rxq_deinit(pp, &pp->rxqs[queue]); } /* Init all Rx queues */ static int mvneta_setup_rxqs(struct mvneta_port *pp) { - int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]); - if (err) { - netdev_err(pp->dev, "%s: can't create rxq=%d\n", - __func__, rxq_def); - mvneta_cleanup_rxqs(pp); - return err; + int queue; + + for (queue = 0; queue < rxq_number; queue++) { + int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); + + if (err) { + netdev_err(pp->dev, "%s: can't create rxq=%d\n", + __func__, queue); + mvneta_cleanup_rxqs(pp); + return err; + } } return 0; @@ -2435,6 +2526,31 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } +static void mvneta_percpu_unmask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are unmasked, but actually only the ones + * maped to this CPU will be unmasked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK_ALL | + MVNETA_TX_INTR_MASK_ALL | + MVNETA_MISCINTR_INTR_MASK); +} + +static void mvneta_percpu_mask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are masked, but actually only the ones + * maped to this CPU will be masked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); +} + static void mvneta_start_dev(struct mvneta_port *pp) { unsigned int cpu; @@ -2452,11 +2568,10 @@ static void mvneta_start_dev(struct mvneta_port *pp) napi_enable(&port->napi); } - /* Unmask interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | - MVNETA_TX_INTR_MASK(txq_number) | - MVNETA_MISCINTR_INTR_MASK); + /* Unmask interrupts. It has to be done from each CPU */ + for_each_online_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, + pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | @@ -2615,7 +2730,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) mvneta_mac_addr_set(pp, dev->dev_addr, -1); /* Set new addr in hw */ - mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); + mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); eth_commit_mac_addr_change(dev, addr); return 0; @@ -2732,22 +2847,45 @@ static void mvneta_percpu_disable(void *arg) static void mvneta_percpu_elect(struct mvneta_port *pp) { - int online_cpu_idx, cpu, i = 0; + int online_cpu_idx, max_cpu, cpu, i = 0; - online_cpu_idx = rxq_def % num_online_cpus(); + online_cpu_idx = pp->rxq_def % num_online_cpus(); + max_cpu = num_present_cpus(); for_each_online_cpu(cpu) { + int rxq_map = 0, txq_map = 0; + int rxq; + + for (rxq = 0; rxq < rxq_number; rxq++) + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + if (i == online_cpu_idx) - /* Enable per-CPU interrupt on the one CPU we - * just elected + /* Map the default receive queue queue to the + * elected CPU */ - smp_call_function_single(cpu, mvneta_percpu_enable, - pp, true); + rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); + + /* We update the TX queue map only if we have one + * queue. In this case we associate the TX queue to + * the CPU bound to the default RX queue + */ + if (txq_number == 1) + txq_map = (i == online_cpu_idx) ? + MVNETA_CPU_TXQ_ACCESS(1) : 0; else - /* Disable per-CPU interrupt on all the other CPU */ - smp_call_function_single(cpu, mvneta_percpu_disable, - pp, true); + txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & + MVNETA_CPU_TXQ_ACCESS_ALL_MASK; + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + + /* Update the interrupt mask on each CPU according the + * new mapping + */ + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, + pp, true); i++; + } }; @@ -2782,12 +2920,22 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); napi_enable(&port->napi); + + /* Enable per-CPU interrupts on the CPU that is + * brought up. + */ + smp_call_function_single(cpu, mvneta_percpu_enable, + pp, true); + /* Enable per-CPU interrupt on the one CPU we care * about. */ mvneta_percpu_elect(pp); - /* Unmask all ethernet port interrupts */ + /* Unmask all ethernet port interrupts, as this + * notifier is called for each CPU then the CPU to + * Queue mapping is applied + */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number) | @@ -2838,7 +2986,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, static int mvneta_open(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - int ret; + int ret, cpu; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + @@ -2868,8 +3016,13 @@ static int mvneta_open(struct net_device *dev) */ mvneta_percpu_disable(pp); - /* Elect a CPU to handle our RX queue interrupt */ - mvneta_percpu_elect(pp); + /* Enable per-CPU interrupt on all the CPU to handle our RX + * queue interrupts + */ + for_each_online_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_enable, + pp, true); + /* Register a CPU notifier to handle the case where our CPU * might be taken offline. @@ -2943,10 +3096,43 @@ int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mvneta_port *pp = netdev_priv(dev); + struct phy_device *phydev = pp->phy_dev; - if (!pp->phy_dev) + if (!phydev) return -ENODEV; + if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { + u32 val; + + mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE); + + if (cmd->autoneg == AUTONEG_DISABLE) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX); + + if (phydev->duplex) + val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (phydev->speed == SPEED_1000) + val |= MVNETA_GMAC_CONFIG_GMII_SPEED; + else if (phydev->speed == SPEED_100) + val |= MVNETA_GMAC_CONFIG_MII_SPEED; + + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE); + netdev_info(pp->dev, "autoneg status set to %i\n", + pp->use_inband_status); + + if (netif_running(dev)) { + mvneta_port_down(pp); + mvneta_port_up(pp); + } + } + return phy_ethtool_sset(pp->phy_dev, cmd); } @@ -3098,6 +3284,106 @@ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } +static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return MVNETA_RSS_LU_TABLE_SIZE; +} + +static int mvneta_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = rxq_number; + return 0; + case ETHTOOL_GRXFH: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int mvneta_config_rss(struct mvneta_port *pp) +{ + int cpu; + u32 val; + + netif_tx_stop_all_queues(pp->dev); + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, + pp, true); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_synchronize(&pcpu_port->napi); + napi_disable(&pcpu_port->napi); + } + + pp->rxq_def = pp->indir[0]; + + /* Update unicast mapping */ + mvneta_set_rx_mode(pp->dev); + + /* Update val of portCfg register accordingly with all RxQueue types */ + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + /* Update the elected CPU matching the new rxq_def */ + mvneta_percpu_elect(pp); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_enable(&pcpu_port->napi); + } + + netif_tx_start_all_queues(pp->dev); + + return 0; +} + +static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + /* We require at least one supported parameter to be changed + * and no change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); + + return mvneta_config_rss(pp); +} + +static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); + + return 0; +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3122,6 +3408,10 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .get_strings = mvneta_ethtool_get_strings, .get_ethtool_stats = mvneta_ethtool_get_stats, .get_sset_count = mvneta_ethtool_get_sset_count, + .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, + .get_rxnfc = mvneta_ethtool_get_rxnfc, + .get_rxfh = mvneta_ethtool_get_rxfh, + .set_rxfh = mvneta_ethtool_set_rxfh, }; /* Initialize hw */ @@ -3230,9 +3520,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) return -EINVAL; } - if (pp->use_inband_status) - ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE; - /* Cancel Port Reset */ ctrl &= ~MVNETA_GMAC2_PORT_RESET; mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); @@ -3314,6 +3601,10 @@ static int mvneta_probe(struct platform_device *pdev) strcmp(managed, "in-band-status") == 0); pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; + pp->rxq_def = rxq_def; + + pp->indir[0] = rxq_def; + pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); @@ -3423,7 +3714,7 @@ static int mvneta_probe(struct platform_device *pdev) mvneta_fixed_link_update(pp, phy); - put_device(&phy->dev); + put_device(&phy->mdio.dev); } return 0; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 5606a043063e..ec0a22119e09 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4380,7 +4380,7 @@ static netdev_features_t sky2_fix_features(struct net_device *dev, */ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { netdev_info(dev, "checksum offload not possible with jumbo frames\n"); - features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM); + features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_CSUM_MASK); } /* Some hardware requires receive checksum for RSS to work. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index eb8a4988de63..af975a2b74c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -155,13 +155,11 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (cq->is_tx) { - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, - NAPI_POLL_WEIGHT); - } else { + if (cq->is_tx) + netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, + NAPI_POLL_WEIGHT); + else netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); - napi_hash_add(&cq->napi); - } napi_enable(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ddb5541882f5..dd84cabb2a51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -337,11 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return bitmap_iterator_count(&it) + (priv->tx_ring_num * 2) + -#ifdef CONFIG_NET_RX_BUSY_POLL - (priv->rx_ring_num * 5); -#else (priv->rx_ring_num * 2); -#endif case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -408,11 +404,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i]->packets; data[index++] = priv->rx_ring[i]->bytes; -#ifdef CONFIG_NET_RX_BUSY_POLL - data[index++] = priv->rx_ring[i]->yields; - data[index++] = priv->rx_ring[i]->misses; - data[index++] = priv->rx_ring[i]->cleaned; -#endif } spin_unlock_bh(&priv->stats_lock); @@ -486,14 +477,6 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); -#ifdef CONFIG_NET_RX_BUSY_POLL - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_napi_yield", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_misses", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_cleaned", i); -#endif } break; case ETH_SS_PRIV_FLAGS: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7869f97de5da..0c7e3f69a73b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -69,34 +69,6 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int mlx4_en_low_latency_recv(struct napi_struct *napi) -{ - struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); - struct net_device *dev = cq->dev; - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; - int done; - - if (!priv->port_up) - return LL_FLUSH_FAILED; - - if (!mlx4_en_cq_lock_poll(cq)) - return LL_FLUSH_BUSY; - - done = mlx4_en_process_rx_cq(dev, cq, 4); - if (likely(done)) - rx_ring->cleaned += done; - else - rx_ring->misses++; - - mlx4_en_cq_unlock_poll(cq); - - return done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -1561,8 +1533,6 @@ int mlx4_en_start_port(struct net_device *dev) for (i = 0; i < priv->rx_ring_num; i++) { cq = priv->rx_cq[i]; - mlx4_en_cq_init_lock(cq); - err = mlx4_en_init_affinity_hint(priv, i); if (err) { en_err(priv, "Failed preparing IRQ affinity hint\n"); @@ -1859,13 +1829,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) for (i = 0; i < priv->rx_ring_num; i++) { struct mlx4_en_cq *cq = priv->rx_cq[i]; - local_bh_disable(); - while (!mlx4_en_cq_lock_napi(cq)) { - pr_info("CQ %d locked\n", i); - mdelay(1); - } - local_bh_enable(); - napi_synchronize(&cq->napi); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); @@ -2507,9 +2470,6 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = mlx4_en_low_latency_recv, -#endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, #ifdef CONFIG_MLX4_EN_VXLAN .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e7a5000aa12c..41440b2b20a3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -873,10 +873,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * - TCP/IP (v4) * - without IP options * - not an IP fragment - * - no LLS polling in progress */ - if (!mlx4_en_cq_busy_polling(cq) && - (dev->features & NETIF_F_GRO)) { + if (dev->features & NETIF_F_GRO) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; @@ -927,7 +925,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); - skb_mark_napi_id(gro_skb, &cq->napi); if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { timestamp = mlx4_en_get_cqe_ts(cqe); @@ -990,13 +987,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud timestamp); } - skb_mark_napi_id(skb, &cq->napi); - - if (!mlx4_en_cq_busy_polling(cq)) - napi_gro_receive(&cq->napi, skb); - else - netif_receive_skb(skb); - + napi_gro_receive(&cq->napi, skb); next: for (nr = 0; nr < priv->num_frags; nr++) mlx4_en_free_frag(priv, frags, nr); @@ -1038,13 +1029,8 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) struct mlx4_en_priv *priv = netdev_priv(dev); int done; - if (!mlx4_en_cq_lock_napi(cq)) - return budget; - done = mlx4_en_process_rx_cq(dev, cq, budget); - mlx4_en_cq_unlock_napi(cq); - /* If we used up all the quota - we're probably not done yet... */ if (done == budget) { const struct cpumask *aff; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 603d1c3d3b2e..4696053165f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -151,6 +151,17 @@ void mlx4_gen_slave_eqe(struct work_struct *work) eqe = next_slave_event_eqe(slave_eq)) { slave = eqe->slave_id; + if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && + eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN && + mlx4_is_bonded(dev)) { + struct mlx4_port_cap port_cap; + + if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) + goto consume; + + if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) + goto consume; + } /* All active slaves need to receive the event */ if (slave == ALL_SLAVES) { for (i = 0; i <= dev->persist->num_vfs; i++) { @@ -174,6 +185,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work) mlx4_warn(dev, "Failed to generate event for slave %d\n", slave); } +consume: ++slave_eq->cons; } } @@ -594,7 +606,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; for (i = 0; i < dev->persist->num_vfs + 1; i++) { - if (!test_bit(i, slaves_port.slaves)) + int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); + + if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (i == mlx4_master_func_num(dev)) @@ -606,7 +620,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.port_change.port = cpu_to_be32( (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) - | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + | (reported_port << 28)); mlx4_slave_event(dev, i, eqe); } } else { /* IB port */ @@ -636,7 +650,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) for (i = 0; i < dev->persist->num_vfs + 1; i++) { - if (!test_bit(i, slaves_port.slaves)) + int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); + + if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (i == mlx4_master_func_num(dev)) continue; @@ -645,7 +661,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eqe->event.port_change.port = cpu_to_be32( (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) - | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + | (reported_port << 28)); mlx4_slave_event(dev, i, eqe); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 90db94e83fde..2c2baab9d880 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1104,6 +1104,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c goto out; MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + port_cap->link_state = (field & 0x80) >> 7; port_cap->supported_port_types = field & 3; port_cap->suggested_type = (field >> 3) & 1; port_cap->default_sense = (field >> 4) & 1; @@ -1310,6 +1311,15 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, port_type |= MLX4_PORT_LINK_UP_MASK; else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) port_type &= ~MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) { + int other_port = (port == 1) ? 2 : 1; + struct mlx4_port_cap port_cap; + + err = mlx4_QUERY_PORT(dev, other_port, &port_cap); + if (err) + goto out; + port_type |= (port_cap.link_state << 7); + } MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); @@ -1325,7 +1335,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, short_field, QUERY_PORT_CUR_MAX_PKEY_OFFSET); } - +out: return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 08de5555c2f4..7ea258af636a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -44,6 +44,7 @@ struct mlx4_mod_stat_cfg { }; struct mlx4_port_cap { + u8 link_state; u8 supported_port_types; u8 suggested_type; u8 default_sense; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 31c491e02e69..f1b6d219e445 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1221,6 +1221,76 @@ err_set_port: return err ? err : count; } +/* bond for multi-function device */ +#define MAX_MF_BOND_ALLOWED_SLAVES 63 +static int mlx4_mf_bond(struct mlx4_dev *dev) +{ + int err = 0; + struct mlx4_slaves_pport slaves_port1; + struct mlx4_slaves_pport slaves_port2; + DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); + + slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); + slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); + bitmap_and(slaves_port_1_2, + slaves_port1.slaves, slaves_port2.slaves, + dev->persist->num_vfs + 1); + + /* only single port vfs are allowed */ + if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { + mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); + return -EINVAL; + } + + /* limit on maximum allowed VFs */ + if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + + bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > + MAX_MF_BOND_ALLOWED_SLAVES) + return -EINVAL; + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); + return -EINVAL; + } + + err = mlx4_bond_mac_table(dev); + if (err) + return err; + err = mlx4_bond_vlan_table(dev); + if (err) + goto err1; + err = mlx4_bond_fs_rules(dev); + if (err) + goto err2; + + return 0; +err2: + (void)mlx4_unbond_vlan_table(dev); +err1: + (void)mlx4_unbond_mac_table(dev); + return err; +} + +static int mlx4_mf_unbond(struct mlx4_dev *dev) +{ + int ret, ret1; + + ret = mlx4_unbond_fs_rules(dev); + if (ret) + mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); + ret1 = mlx4_unbond_mac_table(dev); + if (ret1) { + mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); + ret = ret1; + } + ret1 = mlx4_unbond_vlan_table(dev); + if (ret1) { + mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); + ret = ret1; + } + return ret; +} + int mlx4_bond(struct mlx4_dev *dev) { int ret = 0; @@ -1228,16 +1298,23 @@ int mlx4_bond(struct mlx4_dev *dev) mutex_lock(&priv->bond_mutex); - if (!mlx4_is_bonded(dev)) + if (!mlx4_is_bonded(dev)) { ret = mlx4_do_bond(dev, true); - else - ret = 0; + if (ret) + mlx4_err(dev, "Failed to bond device: %d\n", ret); + if (!ret && mlx4_is_master(dev)) { + ret = mlx4_mf_bond(dev); + if (ret) { + mlx4_err(dev, "bond for multifunction failed\n"); + mlx4_do_bond(dev, false); + } + } + } mutex_unlock(&priv->bond_mutex); - if (ret) - mlx4_err(dev, "Failed to bond device: %d\n", ret); - else + if (!ret) mlx4_dbg(dev, "Device is bonded\n"); + return ret; } EXPORT_SYMBOL_GPL(mlx4_bond); @@ -1249,14 +1326,24 @@ int mlx4_unbond(struct mlx4_dev *dev) mutex_lock(&priv->bond_mutex); - if (mlx4_is_bonded(dev)) + if (mlx4_is_bonded(dev)) { + int ret2 = 0; + ret = mlx4_do_bond(dev, false); + if (ret) + mlx4_err(dev, "Failed to unbond device: %d\n", ret); + if (mlx4_is_master(dev)) + ret2 = mlx4_mf_unbond(dev); + if (ret2) { + mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); + ret = ret2; + } + } mutex_unlock(&priv->bond_mutex); - if (ret) - mlx4_err(dev, "Failed to unbond device: %d\n", ret); - else + if (!ret) mlx4_dbg(dev, "Device is unbonded\n"); + return ret; } EXPORT_SYMBOL_GPL(mlx4_unbond); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e1cf9036af22..2404c22ad2b2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -736,6 +736,7 @@ struct mlx4_catas_err { struct mlx4_mac_table { __be64 entries[MLX4_MAX_MAC_NUM]; int refs[MLX4_MAX_MAC_NUM]; + bool is_dup[MLX4_MAX_MAC_NUM]; struct mutex mutex; int total; int max; @@ -758,6 +759,7 @@ struct mlx4_roce_gid_table { struct mlx4_vlan_table { __be32 entries[MLX4_MAX_VLAN_NUM]; int refs[MLX4_MAX_VLAN_NUM]; + int is_dup[MLX4_MAX_VLAN_NUM]; struct mutex mutex; int total; int max; @@ -1225,6 +1227,10 @@ void mlx4_init_roce_gid_table(struct mlx4_dev *dev, struct mlx4_roce_gid_table *table); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); +int mlx4_bond_vlan_table(struct mlx4_dev *dev); +int mlx4_unbond_vlan_table(struct mlx4_dev *dev); +int mlx4_bond_mac_table(struct mlx4_dev *dev); +int mlx4_unbond_mac_table(struct mlx4_dev *dev); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); /* resource tracker functions*/ @@ -1385,6 +1391,8 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_config_mad_demux(struct mlx4_dev *dev); int mlx4_do_bond(struct mlx4_dev *dev, bool enable); +int mlx4_bond_fs_rules(struct mlx4_dev *dev); +int mlx4_unbond_fs_rules(struct mlx4_dev *dev); enum mlx4_zone_flags { MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c41f15102ae0..35de7d2e6b34 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -320,11 +320,6 @@ struct mlx4_en_rx_ring { void *rx_info; unsigned long bytes; unsigned long packets; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned long yields; - unsigned long misses; - unsigned long cleaned; -#endif unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; @@ -347,18 +342,6 @@ struct mlx4_en_cq { struct mlx4_cqe *buf; #define MLX4_EN_OPCODE_ERROR 0x1e -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define MLX4_EN_CQ_STATE_IDLE 0 -#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ -#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */ -#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL) -#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */ -#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */ -#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) -#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) - spinlock_t poll_lock; /* protects from LLS/napi conflicts */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ struct irq_desc *irq_desc; }; @@ -622,115 +605,6 @@ static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) return buf + idx * cqe_sz; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) -{ - spin_lock_init(&cq->poll_lock); - cq->state = MLX4_EN_CQ_STATE_IDLE; -} - -/* called from the device poll rutine to get ownership of a cq */ -static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) -{ - int rc = true; - spin_lock(&cq->poll_lock); - if (cq->state & MLX4_CQ_LOCKED) { - WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); - cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; - rc = false; - } else - /* we don't care if someone yielded */ - cq->state = MLX4_EN_CQ_STATE_NAPI; - spin_unlock(&cq->poll_lock); - return rc; -} - -/* returns true is someone tried to get the cq while napi had it */ -static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) -{ - int rc = false; - spin_lock(&cq->poll_lock); - WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL | - MLX4_EN_CQ_STATE_NAPI_YIELD)); - - if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) - rc = true; - cq->state = MLX4_EN_CQ_STATE_IDLE; - spin_unlock(&cq->poll_lock); - return rc; -} - -/* called from mlx4_en_low_latency_poll() */ -static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) -{ - int rc = true; - spin_lock_bh(&cq->poll_lock); - if ((cq->state & MLX4_CQ_LOCKED)) { - struct net_device *dev = cq->dev; - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; - - cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; - rc = false; - rx_ring->yields++; - } else - /* preserve yield marks */ - cq->state |= MLX4_EN_CQ_STATE_POLL; - spin_unlock_bh(&cq->poll_lock); - return rc; -} - -/* returns true if someone tried to get the cq while it was locked */ -static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) -{ - int rc = false; - spin_lock_bh(&cq->poll_lock); - WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI)); - - if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) - rc = true; - cq->state = MLX4_EN_CQ_STATE_IDLE; - spin_unlock_bh(&cq->poll_lock); - return rc; -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) -{ - WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); - return cq->state & CQ_USER_PEND; -} -#else -static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) -{ -} - -static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) -{ - return true; -} - -static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) -{ - return false; -} - -static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) void mlx4_en_update_loopback_state(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index c2b21313dba7..f2550425c251 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -61,6 +61,7 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { table->entries[i] = 0; table->refs[i] = 0; + table->is_dup[i] = false; } table->max = 1 << dev->caps.log_num_macs; table->total = 0; @@ -74,6 +75,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { table->entries[i] = 0; table->refs[i] = 0; + table->is_dup[i] = false; } table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; table->total = 0; @@ -159,21 +161,94 @@ int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) } EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); +static bool mlx4_need_mf_bond(struct mlx4_dev *dev) +{ + int i, num_eth_ports = 0; + + if (!mlx4_is_mfunc(dev)) + return false; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + ++num_eth_ports; + + return (num_eth_ports == 2) ? true : false; +} + int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; int i, err = 0; int free = -1; + int free_for_dup = -1; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; + bool need_mf_bond = mlx4_need_mf_bond(dev); + bool can_mf_bond = true; + + mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n", + (unsigned long long)mac, port, + dup ? "with" : "without"); + + if (need_mf_bond) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } + + if (need_mf_bond) { + int index_at_port = -1; + int index_at_dup_port = -1; - mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", - (unsigned long long) mac, port); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))) + index_at_port = i; + if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i])))) + index_at_dup_port = i; + } + + /* check that same mac is not in the tables at different indices */ + if ((index_at_port != index_at_dup_port) && + (index_at_port >= 0) && + (index_at_dup_port >= 0)) + can_mf_bond = false; + + /* If the mac is already in the primary table, the slot must be + * available in the duplicate table as well. + */ + if (index_at_port >= 0 && index_at_dup_port < 0 && + dup_table->refs[index_at_port]) { + can_mf_bond = false; + } + /* If the mac is already in the duplicate table, check that the + * corresponding index is not occupied in the primary table, or + * the primary table already contains the mac at the same index. + * Otherwise, you cannot bond (primary contains a different mac + * at that index). + */ + if (index_at_dup_port >= 0) { + if (!table->refs[index_at_dup_port] || + ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port])))) + free_for_dup = index_at_dup_port; + else + can_mf_bond = false; + } + } - mutex_lock(&table->mutex); for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (!table->refs[i]) { if (free < 0) free = i; + if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { + if (!dup_table->refs[i]) + free_for_dup = i; + } continue; } @@ -182,10 +257,30 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) /* MAC already registered, increment ref count */ err = i; ++table->refs[i]; + if (dup) { + u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]); + + if (dup_mac != mac || !dup_table->is_dup[i]) { + mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n", + mac, dup_port, i); + } + } goto out; } } + if (need_mf_bond && (free_for_dup < 0)) { + if (dup) { + mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n"); + mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n"); + dup = false; + } + can_mf_bond = false; + } + + if (need_mf_bond && can_mf_bond) + free = free_for_dup; + mlx4_dbg(dev, "Free MAC index is %d\n", free); if (table->total == table->max) { @@ -205,10 +300,35 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) goto out; } table->refs[free] = 1; - err = free; + table->is_dup[free] = false; ++table->total; + if (dup) { + dup_table->refs[free] = 0; + dup_table->is_dup[free] = true; + dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac); + dup_table->is_dup[free] = false; + dup_table->entries[free] = 0; + goto out; + } + ++dup_table->total; + } + err = free; out: - mutex_unlock(&table->mutex); + if (need_mf_bond) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } EXPORT_SYMBOL_GPL(__mlx4_register_mac); @@ -255,6 +375,9 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) struct mlx4_port_info *info; struct mlx4_mac_table *table; int index; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; if (port < 1 || port > dev->caps.num_ports) { mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); @@ -262,22 +385,59 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) } info = &mlx4_priv(dev)->port[port]; table = &info->mac_table; - mutex_lock(&table->mutex); + + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } + index = find_index(dev, table, mac); if (validate_index(dev, table, index)) goto out; - if (--table->refs[index]) { + + if (--table->refs[index] || table->is_dup[index]) { mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", index); + if (!table->refs[index]) + dup_table->is_dup[index] = false; goto out; } table->entries[index] = 0; - mlx4_set_port_mac_table(dev, port, table->entries); + if (mlx4_set_port_mac_table(dev, port, table->entries)) + mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port); --table->total; + + if (dup) { + dup_table->is_dup[index] = false; + if (dup_table->refs[index]) + goto out; + dup_table->entries[index] = 0; + if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries)) + mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port); + + --table->total; + } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } } EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); @@ -311,9 +471,22 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) struct mlx4_mac_table *table = &info->mac_table; int index = qpn - info->base_qpn; int err = 0; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table; /* CX1 doesn't support multi-functions */ - mutex_lock(&table->mutex); + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } err = validate_index(dev, table, index); if (err) @@ -326,9 +499,30 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); table->entries[index] = 0; + } else { + if (dup) { + dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n", + (unsigned long long)new_mac); + dup_table->entries[index] = 0; + } + } } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } EXPORT_SYMBOL_GPL(__mlx4_replace_mac); @@ -380,8 +574,28 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int i, err = 0; int free = -1; - - mutex_lock(&table->mutex); + int free_for_dup = -1; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table; + bool need_mf_bond = mlx4_need_mf_bond(dev); + bool can_mf_bond = true; + + mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n", + vlan, port, + dup ? "with" : "without"); + + if (need_mf_bond) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } if (table->total == table->max) { /* No free vlan entries */ @@ -389,22 +603,85 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, goto out; } + if (need_mf_bond) { + int index_at_port = -1; + int index_at_dup_port = -1; + + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { + if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) + index_at_port = i; + if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))) + index_at_dup_port = i; + } + /* check that same vlan is not in the tables at different indices */ + if ((index_at_port != index_at_dup_port) && + (index_at_port >= 0) && + (index_at_dup_port >= 0)) + can_mf_bond = false; + + /* If the vlan is already in the primary table, the slot must be + * available in the duplicate table as well. + */ + if (index_at_port >= 0 && index_at_dup_port < 0 && + dup_table->refs[index_at_port]) { + can_mf_bond = false; + } + /* If the vlan is already in the duplicate table, check that the + * corresponding index is not occupied in the primary table, or + * the primary table already contains the vlan at the same index. + * Otherwise, you cannot bond (primary contains a different vlan + * at that index). + */ + if (index_at_dup_port >= 0) { + if (!table->refs[index_at_dup_port] || + (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port])))) + free_for_dup = index_at_dup_port; + else + can_mf_bond = false; + } + } + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { - if (free < 0 && (table->refs[i] == 0)) { - free = i; - continue; + if (!table->refs[i]) { + if (free < 0) + free = i; + if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { + if (!dup_table->refs[i]) + free_for_dup = i; + } } - if (table->refs[i] && + if ((table->refs[i] || table->is_dup[i]) && (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) { /* Vlan already registered, increase references count */ + mlx4_dbg(dev, "vlan %u is already registered.\n", vlan); *index = i; ++table->refs[i]; + if (dup) { + u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]); + + if (dup_vlan != vlan || !dup_table->is_dup[i]) { + mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n", + vlan, dup_port, i); + } + } goto out; } } + if (need_mf_bond && (free_for_dup < 0)) { + if (dup) { + mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n"); + mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n"); + dup = false; + } + can_mf_bond = false; + } + + if (need_mf_bond && can_mf_bond) + free = free_for_dup; + if (free < 0) { err = -ENOMEM; goto out; @@ -412,6 +689,7 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, /* Register new VLAN */ table->refs[free] = 1; + table->is_dup[free] = false; table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); err = mlx4_set_port_vlan_table(dev, port, table->entries); @@ -421,11 +699,35 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, table->entries[free] = 0; goto out; } + ++table->total; + if (dup) { + dup_table->refs[free] = 0; + dup_table->is_dup[free] = true; + dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); + + err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan); + dup_table->is_dup[free] = false; + dup_table->entries[free] = 0; + goto out; + } + ++dup_table->total; + } *index = free; - ++table->total; out: - mutex_unlock(&table->mutex); + if (need_mf_bond) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } return err; } @@ -455,8 +757,22 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; int index; + bool dup = mlx4_is_mf_bonded(dev); + u8 dup_port = (port == 1) ? 2 : 1; + struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table; + + if (dup) { + if (port == 1) { + mutex_lock(&table->mutex); + mutex_lock(&dup_table->mutex); + } else { + mutex_lock(&dup_table->mutex); + mutex_lock(&table->mutex); + } + } else { + mutex_lock(&table->mutex); + } - mutex_lock(&table->mutex); if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); goto out; @@ -467,16 +783,38 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) goto out; } - if (--table->refs[index]) { + if (--table->refs[index] || table->is_dup[index]) { mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", table->refs[index], index); + if (!table->refs[index]) + dup_table->is_dup[index] = false; goto out; } table->entries[index] = 0; - mlx4_set_port_vlan_table(dev, port, table->entries); + if (mlx4_set_port_vlan_table(dev, port, table->entries)) + mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port); --table->total; + if (dup) { + dup_table->is_dup[index] = false; + if (dup_table->refs[index]) + goto out; + dup_table->entries[index] = 0; + if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries)) + mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port); + --dup_table->total; + } out: - mutex_unlock(&table->mutex); + if (dup) { + if (port == 2) { + mutex_unlock(&table->mutex); + mutex_unlock(&dup_table->mutex); + } else { + mutex_unlock(&dup_table->mutex); + mutex_unlock(&table->mutex); + } + } else { + mutex_unlock(&table->mutex); + } } void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) @@ -495,6 +833,220 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) } EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); +int mlx4_bond_mac_table(struct mlx4_dev *dev) +{ + struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; + struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; + int ret = 0; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if ((t1->entries[i] != t2->entries[i]) && + t1->entries[i] && t2->entries[i]) { + mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (t1->entries[i] && !t2->entries[i]) { + t2->entries[i] = t1->entries[i]; + t2->is_dup[i] = true; + update2 = true; + } else if (!t1->entries[i] && t2->entries[i]) { + t1->entries[i] = t2->entries[i]; + t1->is_dup[i] = true; + update1 = true; + } else if (t1->entries[i] && t2->entries[i]) { + t1->is_dup[i] = true; + t2->is_dup[i] = true; + } + } + + if (update1) { + ret = mlx4_set_port_mac_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret); + } + if (!ret && update2) { + ret = mlx4_set_port_mac_table(dev, 2, t2->entries); + if (ret) + mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret); + } + + if (ret) + mlx4_warn(dev, "failed to create mirror MAC tables\n"); +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_unbond_mac_table(struct mlx4_dev *dev) +{ + struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; + struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; + int ret = 0; + int ret1; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (t1->entries[i] != t2->entries[i]) { + mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n"); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!t1->entries[i]) + continue; + t1->is_dup[i] = false; + if (!t1->refs[i]) { + t1->entries[i] = 0; + update1 = true; + } + t2->is_dup[i] = false; + if (!t2->refs[i]) { + t2->entries[i] = 0; + update2 = true; + } + } + + if (update1) { + ret = mlx4_set_port_mac_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret); + } + if (update2) { + ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries); + if (ret1) { + mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1); + ret = ret1; + } + } +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_bond_vlan_table(struct mlx4_dev *dev) +{ + struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; + struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; + int ret = 0; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if ((t1->entries[i] != t2->entries[i]) && + t1->entries[i] && t2->entries[i]) { + mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (t1->entries[i] && !t2->entries[i]) { + t2->entries[i] = t1->entries[i]; + t2->is_dup[i] = true; + update2 = true; + } else if (!t1->entries[i] && t2->entries[i]) { + t1->entries[i] = t2->entries[i]; + t1->is_dup[i] = true; + update1 = true; + } else if (t1->entries[i] && t2->entries[i]) { + t1->is_dup[i] = true; + t2->is_dup[i] = true; + } + } + + if (update1) { + ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret); + } + if (!ret && update2) { + ret = mlx4_set_port_vlan_table(dev, 2, t2->entries); + if (ret) + mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret); + } + + if (ret) + mlx4_warn(dev, "failed to create mirror VLAN tables\n"); +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + +int mlx4_unbond_vlan_table(struct mlx4_dev *dev) +{ + struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; + struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; + int ret = 0; + int ret1; + int i; + bool update1 = false; + bool update2 = false; + + mutex_lock(&t1->mutex); + mutex_lock(&t2->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (t1->entries[i] != t2->entries[i]) { + mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n"); + ret = -EINVAL; + goto unlock; + } + } + + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + if (!t1->entries[i]) + continue; + t1->is_dup[i] = false; + if (!t1->refs[i]) { + t1->entries[i] = 0; + update1 = true; + } + t2->is_dup[i] = false; + if (!t2->refs[i]) { + t2->entries[i] = 0; + update2 = true; + } + } + + if (update1) { + ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); + if (ret) + mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret); + } + if (update2) { + ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries); + if (ret1) { + mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1); + ret = ret1; + } + } +unlock: + mutex_unlock(&t2->mutex); + mutex_unlock(&t1->mutex); + return ret; +} + int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) { struct mlx4_cmd_mailbox *inmailbox, *outmailbox; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index cad6c44df91c..b46dbe29ef6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -222,6 +222,13 @@ enum res_fs_rule_states { struct res_fs_rule { struct res_common com; int qpn; + /* VF DMFS mbox with port flipped */ + void *mirr_mbox; + /* > 0 --> apply mirror when getting into HA mode */ + /* = 0 --> un-apply mirror when getting out of HA mode */ + u32 mirr_mbox_size; + struct list_head mirr_list; + u64 mirr_rule_id; }; static void *res_tracker_lookup(struct rb_root *root, u64 res_id) @@ -4284,6 +4291,22 @@ err_mac: return err; } +static u32 qp_attach_mbox_size(void *mbox) +{ + u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl); + struct _rule_hw *rule_header; + + rule_header = (struct _rule_hw *)(mbox + size); + + while (rule_header->size) { + size += rule_header->size * sizeof(u32); + rule_header += 1; + } + return size; +} + +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule); + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4300,6 +4323,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct _rule_hw *rule_header; int header_id; + struct res_fs_rule *rrule; + u32 mbox_size; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -4329,7 +4354,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, case MLX4_NET_TRANS_RULE_ID_ETH: if (validate_eth_header_mac(slave, rule_header, rlist)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } break; case MLX4_NET_TRANS_RULE_ID_IB: @@ -4340,7 +4365,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } vhcr->in_modifier += sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; @@ -4348,7 +4373,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, default: pr_err("Corrupted mailbox\n"); err = -EINVAL; - goto err_put; + goto err_put_qp; } execute: @@ -4357,23 +4382,69 @@ execute: MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - goto err_put; + goto err_put_qp; + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { mlx4_err(dev, "Fail to add flow steering resources\n"); - /* detach rule*/ + goto err_detach; + } + + err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); + if (err) + goto err_detach; + + mbox_size = qp_attach_mbox_size(inbox->buf); + rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL); + if (!rrule->mirr_mbox) { + err = -ENOMEM; + goto err_put_rule; + } + rrule->mirr_mbox_size = mbox_size; + rrule->mirr_rule_id = 0; + memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); + + /* set different port */ + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; + if (ctrl->port == 1) + ctrl->port = 2; + else + ctrl->port = 1; + + if (mlx4_is_bonded(dev)) + mlx4_do_mirror_rule(dev, rrule); + + atomic_inc(&rqp->ref_count); + +err_put_rule: + put_res(dev, slave, vhcr->out_param, RES_FS_RULE); +err_detach: + /* detach rule on error */ + if (err) mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); - goto err_put; - } - atomic_inc(&rqp->ref_count); -err_put: +err_put_qp: put_res(dev, slave, qpn, RES_QP); return err; } +static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + int err; + + err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to remove flow steering resources\n"); + return err; + } + + mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + return 0; +} + int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4383,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, int err; struct res_qp *rqp; struct res_fs_rule *rrule; + u64 mirr_reg_id; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -4391,12 +4463,30 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); if (err) return err; + + if (!rrule->mirr_mbox) { + mlx4_err(dev, "Mirror rules cannot be removed explicitly\n"); + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + return -EINVAL; + } + mirr_reg_id = rrule->mirr_rule_id; + kfree(rrule->mirr_mbox); + /* Release the rule form busy state before removal */ put_res(dev, slave, vhcr->in_param, RES_FS_RULE); err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); if (err) return err; + if (mirr_reg_id && mlx4_is_bonded(dev)) { + err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); + if (err) { + mlx4_err(dev, "Fail to get resource of mirror rule\n"); + } else { + put_res(dev, slave, mirr_reg_id, RES_FS_RULE); + mlx4_undo_mirror_rule(dev, rrule); + } + } err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { mlx4_err(dev, "Fail to remove flow steering resources\n"); @@ -4834,6 +4924,91 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) spin_unlock_irq(mlx4_tlock(dev)); } +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + struct res_fs_rule *mirr_rule; + u64 reg_id; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + if (!fs_rule->mirr_mbox) { + mlx4_err(dev, "rule mirroring mailbox is null\n"); + return -EINVAL; + } + memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); + err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (err) + goto err; + + err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn); + if (err) + goto err_detach; + + err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); + if (err) + goto err_rem; + + fs_rule->mirr_rule_id = reg_id; + mirr_rule->mirr_rule_id = 0; + mirr_rule->mirr_mbox_size = 0; + mirr_rule->mirr_mbox = NULL; + put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE); + + return 0; +err_rem: + rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0); +err_detach: + mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +err: + return err; +} + +static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; + struct rb_node *p; + struct res_fs_rule *fs_rule; + int err = 0; + LIST_HEAD(mirr_list); + + for (p = rb_first(root); p; p = rb_next(p)) { + fs_rule = rb_entry(p, struct res_fs_rule, com.node); + if ((bond && fs_rule->mirr_mbox_size) || + (!bond && !fs_rule->mirr_mbox_size)) + list_add_tail(&fs_rule->mirr_list, &mirr_list); + } + + list_for_each_entry(fs_rule, &mirr_list, mirr_list) { + if (bond) + err += mlx4_do_mirror_rule(dev, fs_rule); + else + err += mlx4_undo_mirror_rule(dev, fs_rule); + } + return err; +} + +int mlx4_bond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, true); +} + +int mlx4_unbond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, false); +} + static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 158c88c69ef9..c503ea05e742 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -13,6 +13,7 @@ config MLX5_CORE config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + select PTP_1588_CLOCK default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 26a68b8af2c5..01c0256effb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \ - en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o + mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ + en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ + en_txrx.o en_clock.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 22e72bf1ae48..9ea49a893323 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -32,6 +32,9 @@ #include <linux/if_vlan.h> #include <linux/etherdevice.h> +#include <linux/timecounter.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/cq.h> @@ -64,6 +67,8 @@ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 +#define MLX5E_NUM_MAIN_GROUPS 9 + static const char vport_strings[][ETH_GSTRING_LEN] = { /* vport statistics */ "rx_packets", @@ -282,6 +287,19 @@ struct mlx5e_params { u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; }; +struct mlx5e_tstamp { + rwlock_t lock; + struct cyclecounter cycles; + struct timecounter clock; + struct hwtstamp_config hwtstamp_config; + u32 nominal_c_mult; + unsigned long overflow_period; + struct delayed_work overflow_work; + struct mlx5_core_dev *mdev; + struct ptp_clock *ptp; + struct ptp_clock_info ptp_info; +}; + enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, }; @@ -313,6 +331,7 @@ struct mlx5e_rq { struct device *pdev; struct net_device *netdev; + struct mlx5e_tstamp *tstamp; struct mlx5e_rq_stats stats; struct mlx5e_cq cq; @@ -326,14 +345,12 @@ struct mlx5e_rq { struct mlx5e_priv *priv; } ____cacheline_aligned_in_smp; -struct mlx5e_tx_skb_cb { +struct mlx5e_tx_wqe_info { u32 num_bytes; u8 num_wqebbs; u8 num_dma; }; -#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) - enum mlx5e_dma_map_type { MLX5E_DMA_MAP_SINGLE, MLX5E_DMA_MAP_PAGE @@ -369,6 +386,7 @@ struct mlx5e_sq { /* pointers to per packet info: write@xmit, read@completion */ struct sk_buff **skb; struct mlx5e_sq_dma *dma_fifo; + struct mlx5e_tx_wqe_info *wqe_info; /* read only */ struct mlx5_wq_cyc wq; @@ -381,6 +399,7 @@ struct mlx5e_sq { u16 max_inline; u16 edge; struct device *pdev; + struct mlx5e_tstamp *tstamp; __be32 mkey_be; unsigned long state; @@ -442,7 +461,7 @@ enum mlx5e_rqt_ix { struct mlx5e_eth_addr_info { u8 addr[ETH_ALEN + 2]; u32 tt_vec; - u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */ + struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT]; }; #define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) @@ -465,15 +484,23 @@ enum { }; struct mlx5e_vlan_db { - u32 active_vlans_ft_ix[VLAN_N_VID]; - u32 untagged_rule_ft_ix; - u32 any_vlan_rule_ft_ix; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; + struct mlx5_flow_rule *untagged_rule; + struct mlx5_flow_rule *any_vlan_rule; bool filter_disabled; }; struct mlx5e_flow_table { - void *vlan; - void *main; + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; +}; + +struct mlx5e_flow_tables { + struct mlx5_flow_namespace *ns; + struct mlx5e_flow_table vlan; + struct mlx5e_flow_table main; }; struct mlx5e_priv { @@ -496,7 +523,7 @@ struct mlx5e_priv { u32 rqtn[MLX5E_NUM_RQT]; u32 tirn[MLX5E_NUM_TT]; - struct mlx5e_flow_table ft; + struct mlx5e_flow_tables fts; struct mlx5e_eth_addr_db eth_addr; struct mlx5e_vlan_db vlan; @@ -509,6 +536,7 @@ struct mlx5e_priv { struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; + struct mlx5e_tstamp tstamp; }; #define MLX5E_NET_IP_ALIGN 2 @@ -564,7 +592,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); -bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); @@ -575,6 +603,13 @@ void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); void mlx5e_init_eth_addr(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); +void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, + struct skb_shared_hwtstamps *hwts); +void mlx5e_timestamp_init(struct mlx5e_priv *priv); +void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); +int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); +int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); + int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c new file mode 100644 index 000000000000..be6543570b2b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/clocksource.h> +#include "en.h" + +enum { + MLX5E_CYCLES_SHIFT = 23 +}; + +void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, + struct skb_shared_hwtstamps *hwts) +{ + u64 nsec; + + read_lock(&tstamp->lock); + nsec = timecounter_cyc2time(&tstamp->clock, timestamp); + read_unlock(&tstamp->lock); + + hwts->hwtstamp = ns_to_ktime(nsec); +} + +static cycle_t mlx5e_read_internal_timer(const struct cyclecounter *cc) +{ + struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, + cycles); + + return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; +} + +static void mlx5e_timestamp_overflow(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, + overflow_work); + + write_lock(&tstamp->lock); + timecounter_read(&tstamp->clock); + write_unlock(&tstamp->lock); + schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); +} + +int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; +} + +static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + u64 ns = timespec64_to_ns(ts); + + write_lock(&tstamp->lock); + timecounter_init(&tstamp->clock, &tstamp->cycles, ns); + write_unlock(&tstamp->lock); + + return 0; +} + +static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + u64 ns; + + write_lock(&tstamp->lock); + ns = timecounter_read(&tstamp->clock); + write_unlock(&tstamp->lock); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + + write_lock(&tstamp->lock); + timecounter_adjtime(&tstamp->clock, delta); + write_unlock(&tstamp->lock); + + return 0; +} + +static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + u64 adj; + u32 diff; + int neg_adj = 0; + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + adj = tstamp->nominal_c_mult; + adj *= delta; + diff = div_u64(adj, 1000000000ULL); + + write_lock(&tstamp->lock); + timecounter_read(&tstamp->clock); + tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : + tstamp->nominal_c_mult + diff; + write_unlock(&tstamp->lock); + + return 0; +} + +static const struct ptp_clock_info mlx5e_ptp_clock_info = { + .owner = THIS_MODULE, + .max_adj = 100000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = mlx5e_ptp_adjfreq, + .adjtime = mlx5e_ptp_adjtime, + .gettime64 = mlx5e_ptp_gettime, + .settime64 = mlx5e_ptp_settime, + .enable = NULL, +}; + +static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) +{ + tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +} + +void mlx5e_timestamp_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + u64 ns; + u64 frac = 0; + u32 dev_freq; + + mlx5e_timestamp_init_config(tstamp); + dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz); + if (!dev_freq) { + mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n"); + return; + } + rwlock_init(&tstamp->lock); + tstamp->cycles.read = mlx5e_read_internal_timer; + tstamp->cycles.shift = MLX5E_CYCLES_SHIFT; + tstamp->cycles.mult = clocksource_khz2mult(dev_freq, + tstamp->cycles.shift); + tstamp->nominal_c_mult = tstamp->cycles.mult; + tstamp->cycles.mask = CLOCKSOURCE_MASK(41); + tstamp->mdev = priv->mdev; + + timecounter_init(&tstamp->clock, &tstamp->cycles, + ktime_to_ns(ktime_get_real())); + + /* Calculate period in seconds to call the overflow watchdog - to make + * sure counter is checked at least once every wrap around. + */ + ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, + frac, &frac); + do_div(ns, NSEC_PER_SEC / 2 / HZ); + tstamp->overflow_period = ns; + + INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); + if (tstamp->overflow_period) + schedule_delayed_work(&tstamp->overflow_work, 0); + else + mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); + + /* Configure the PHC */ + tstamp->ptp_info = mlx5e_ptp_clock_info; + snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, + &priv->mdev->pdev->dev); + if (IS_ERR_OR_NULL(tstamp->ptp)) { + mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n", + PTR_ERR(tstamp->ptp)); + tstamp->ptp = NULL; + } +} + +void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return; + + if (priv->tstamp.ptp) { + ptp_clock_unregister(priv->tstamp.ptp); + priv->tstamp.ptp = NULL; + } + + cancel_delayed_work_sync(&tstamp->overflow_work); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2e022e900939..65624ac65b4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -855,6 +855,35 @@ static int mlx5e_set_pauseparam(struct net_device *netdev, return err; } +static int mlx5e_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ret; + + ret = ethtool_op_get_ts_info(dev, info); + if (ret) + return ret; + + info->phc_index = priv->tstamp.ptp ? + ptp_clock_index(priv->tstamp.ptp) : -1; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return 0; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | + (BIT(1) << HWTSTAMP_TX_ON); + + info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | + (BIT(1) << HWTSTAMP_FILTER_ALL); + + return 0; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -878,4 +907,5 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_tunable = mlx5e_set_tunable, .get_pauseparam = mlx5e_get_pauseparam, .set_pauseparam = mlx5e_set_pauseparam, + .get_ts_info = mlx5e_get_ts_info, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c deleted file mode 100644 index 22d603f78273..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ /dev/null @@ -1,907 +0,0 @@ -/* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/list.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/tcp.h> -#include <linux/mlx5/flow_table.h> -#include "en.h" - -enum { - MLX5E_FULLMATCH = 0, - MLX5E_ALLMULTI = 1, - MLX5E_PROMISC = 2, -}; - -enum { - MLX5E_UC = 0, - MLX5E_MC_IPV4 = 1, - MLX5E_MC_IPV6 = 2, - MLX5E_MC_OTHER = 3, -}; - -enum { - MLX5E_ACTION_NONE = 0, - MLX5E_ACTION_ADD = 1, - MLX5E_ACTION_DEL = 2, -}; - -struct mlx5e_eth_addr_hash_node { - struct hlist_node hlist; - u8 action; - struct mlx5e_eth_addr_info ai; -}; - -static inline int mlx5e_hash_eth_addr(u8 *addr) -{ - return addr[5]; -} - -static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) -{ - struct mlx5e_eth_addr_hash_node *hn; - int ix = mlx5e_hash_eth_addr(addr); - int found = 0; - - hlist_for_each_entry(hn, &hash[ix], hlist) - if (ether_addr_equal_64bits(hn->ai.addr, addr)) { - found = 1; - break; - } - - if (found) { - hn->action = MLX5E_ACTION_NONE; - return; - } - - hn = kzalloc(sizeof(*hn), GFP_ATOMIC); - if (!hn) - return; - - ether_addr_copy(hn->ai.addr, addr); - hn->action = MLX5E_ACTION_ADD; - - hlist_add_head(&hn->hlist, &hash[ix]); -} - -static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) -{ - hlist_del(&hn->hlist); - kfree(hn); -} - -static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai) -{ - void *ft = priv->ft.main; - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) - mlx5_del_flow_table_entry(ft, - ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]); - - if (ai->tt_vec & BIT(MLX5E_TT_ANY)) - mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]); -} - -static int mlx5e_get_eth_addr_type(u8 *addr) -{ - if (is_unicast_ether_addr(addr)) - return MLX5E_UC; - - if ((addr[0] == 0x01) && - (addr[1] == 0x00) && - (addr[2] == 0x5e) && - !(addr[3] & 0x80)) - return MLX5E_MC_IPV4; - - if ((addr[0] == 0x33) && - (addr[1] == 0x33)) - return MLX5E_MC_IPV6; - - return MLX5E_MC_OTHER; -} - -static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) -{ - int eth_addr_type; - u32 ret; - - switch (type) { - case MLX5E_FULLMATCH: - eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); - switch (eth_addr_type) { - case MLX5E_UC: - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - case MLX5E_MC_IPV4: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV4) | - 0; - break; - - case MLX5E_MC_IPV6: - ret = - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV6) | - 0; - break; - - case MLX5E_MC_OTHER: - ret = - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - break; - - case MLX5E_ALLMULTI: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - default: /* MLX5E_PROMISC */ - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - return ret; -} - -static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, int type, - void *flow_context, void *match_criteria) -{ - u8 match_criteria_enable = 0; - void *match_value; - void *dest; - u8 *dmac; - u8 *match_criteria_dmac; - void *ft = priv->ft.main; - u32 *tirn = priv->tirn; - u32 *ft_ix; - u32 tt_vec; - int err; - - match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); - dmac = MLX5_ADDR_OF(fte_match_param, match_value, - outer_headers.dmac_47_16); - match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, - outer_headers.dmac_47_16); - dest = MLX5_ADDR_OF(flow_context, flow_context, destination); - - MLX5_SET(flow_context, flow_context, action, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); - MLX5_SET(flow_context, flow_context, destination_list_size, 1); - MLX5_SET(dest_format_struct, dest, destination_type, - MLX5_FLOW_CONTEXT_DEST_TYPE_TIR); - - switch (type) { - case MLX5E_FULLMATCH: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - memset(match_criteria_dmac, 0xff, ETH_ALEN); - ether_addr_copy(dmac, ai->addr); - break; - - case MLX5E_ALLMULTI: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - match_criteria_dmac[0] = 0x01; - dmac[0] = 0x01; - break; - - case MLX5E_PROMISC: - break; - } - - tt_vec = mlx5e_get_tt_vec(ai, type); - - ft_ix = &ai->ft_ix[MLX5E_TT_ANY]; - if (tt_vec & BIT(MLX5E_TT_ANY)) { - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_ANY]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_ANY); - } - - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.ethertype); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4]; - if (tt_vec & BIT(MLX5E_TT_IPV4)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6]; - if (tt_vec & BIT(MLX5E_TT_IPV6)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6); - } - - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.ip_protocol); - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_UDP); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP]; - if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_UDP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP]; - if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_UDP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); - } - - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_TCP); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP]; - if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_TCP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP]; - if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_TCP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); - } - - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_AH); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]; - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_IPSEC_AH]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]; - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_IPSEC_AH]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); - } - - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, - IPPROTO_ESP); - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]; - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IP); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV4_IPSEC_ESP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); - } - - ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]; - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, - ETH_P_IPV6); - MLX5_SET(dest_format_struct, dest, destination_id, - tirn[MLX5E_TT_IPV6_IPSEC_ESP]); - err = mlx5_add_flow_table_entry(ft, match_criteria_enable, - match_criteria, flow_context, - ft_ix); - if (err) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); - } - - return 0; - -err_del_ai: - mlx5e_del_eth_addr_from_flow_table(priv, ai); - - return err; -} - -static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, int type) -{ - u32 *flow_context; - u32 *match_criteria; - int err; - - flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + - MLX5_ST_SZ_BYTES(dest_format_struct)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!flow_context || !match_criteria) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_eth_addr_rule_out; - } - - err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context, - match_criteria); - if (err) - netdev_err(priv->netdev, "%s: failed\n", __func__); - -add_eth_addr_rule_out: - kvfree(match_criteria); - kvfree(flow_context); - return err; -} - -enum mlx5e_vlan_rule_type { - MLX5E_VLAN_RULE_TYPE_UNTAGGED, - MLX5E_VLAN_RULE_TYPE_ANY_VID, - MLX5E_VLAN_RULE_TYPE_MATCH_VID, -}; - -static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, - enum mlx5e_vlan_rule_type rule_type, u16 vid) -{ - u8 match_criteria_enable = 0; - u32 *flow_context; - void *match_value; - void *dest; - u32 *match_criteria; - u32 *ft_ix; - int err; - - flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + - MLX5_ST_SZ_BYTES(dest_format_struct)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!flow_context || !match_criteria) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_vlan_rule_out; - } - match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); - dest = MLX5_ADDR_OF(flow_context, flow_context, destination); - - MLX5_SET(flow_context, flow_context, action, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); - MLX5_SET(flow_context, flow_context, destination_list_size, 1); - MLX5_SET(dest_format_struct, dest, destination_type, - MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE); - MLX5_SET(dest_format_struct, dest, destination_id, - mlx5_get_flow_table_id(priv->ft.main)); - - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.vlan_tag); - - switch (rule_type) { - case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - ft_ix = &priv->vlan.untagged_rule_ft_ix; - break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - ft_ix = &priv->vlan.any_vlan_rule_ft_ix; - MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, - 1); - break; - default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ - ft_ix = &priv->vlan.active_vlans_ft_ix[vid]; - MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, - 1); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - outer_headers.first_vid); - MLX5_SET(fte_match_param, match_value, outer_headers.first_vid, - vid); - break; - } - - err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable, - match_criteria, flow_context, ft_ix); - if (err) - netdev_err(priv->netdev, "%s: failed\n", __func__); - -add_vlan_rule_out: - kvfree(match_criteria); - kvfree(flow_context); - return err; -} - -static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, - enum mlx5e_vlan_rule_type rule_type, u16 vid) -{ - switch (rule_type) { - case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - mlx5_del_flow_table_entry(priv->ft.vlan, - priv->vlan.untagged_rule_ft_ix); - break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - mlx5_del_flow_table_entry(priv->ft.vlan, - priv->vlan.any_vlan_rule_ft_ix); - break; - case MLX5E_VLAN_RULE_TYPE_MATCH_VID: - mlx5_del_flow_table_entry(priv->ft.vlan, - priv->vlan.active_vlans_ft_ix[vid]); - break; - } -} - -void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) -{ - if (!priv->vlan.filter_disabled) - return; - - priv->vlan.filter_disabled = false; - if (priv->netdev->flags & IFF_PROMISC) - return; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); -} - -void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) -{ - if (priv->vlan.filter_disabled) - return; - - priv->vlan.filter_disabled = true; - if (priv->netdev->flags & IFF_PROMISC) - return; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); -} - -int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); -} - -int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); - - return 0; -} - -#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ - for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ - hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) - -static void mlx5e_execute_action(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_hash_node *hn) -{ - switch (hn->action) { - case MLX5E_ACTION_ADD: - mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); - hn->action = MLX5E_ACTION_NONE; - break; - - case MLX5E_ACTION_DEL: - mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); - mlx5e_del_eth_addr_from_hash(hn); - break; - } -} - -static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) -{ - struct net_device *netdev = priv->netdev; - struct netdev_hw_addr *ha; - - netif_addr_lock_bh(netdev); - - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, - priv->netdev->dev_addr); - - netdev_for_each_uc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); - - netdev_for_each_mc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); - - netif_addr_unlock_bh(netdev); -} - -static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) -{ - struct mlx5e_eth_addr_hash_node *hn; - struct hlist_node *tmp; - int i; - - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) - mlx5e_execute_action(priv, hn); - - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) - mlx5e_execute_action(priv, hn); -} - -static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) -{ - struct mlx5e_eth_addr_hash_node *hn; - struct hlist_node *tmp; - int i; - - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) - hn->action = MLX5E_ACTION_DEL; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) - hn->action = MLX5E_ACTION_DEL; - - if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) - mlx5e_sync_netdev_addr(priv); - - mlx5e_apply_netdev_addr(priv); -} - -void mlx5e_set_rx_mode_work(struct work_struct *work) -{ - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - set_rx_mode_work); - - struct mlx5e_eth_addr_db *ea = &priv->eth_addr; - struct net_device *ndev = priv->netdev; - - bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); - bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); - bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); - bool broadcast_enabled = rx_mode_enable; - - bool enable_promisc = !ea->promisc_enabled && promisc_enabled; - bool disable_promisc = ea->promisc_enabled && !promisc_enabled; - bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; - bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; - bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; - bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; - - if (enable_promisc) { - mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); - if (!priv->vlan.filter_disabled) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - } - if (enable_allmulti) - mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); - if (enable_broadcast) - mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); - - mlx5e_handle_netdev_addr(priv); - - if (disable_broadcast) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); - if (disable_allmulti) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); - if (disable_promisc) { - if (!priv->vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); - } - - ea->promisc_enabled = promisc_enabled; - ea->allmulti_enabled = allmulti_enabled; - ea->broadcast_enabled = broadcast_enabled; -} - -void mlx5e_init_eth_addr(struct mlx5e_priv *priv) -{ - ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); -} - -static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) -{ - struct mlx5_flow_table_group *g; - u8 *dmac; - - g = kcalloc(9, sizeof(*g), GFP_KERNEL); - if (!g) - return -ENOMEM; - - g[0].log_sz = 3; - g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.ip_protocol); - - g[1].log_sz = 1; - g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, - outer_headers.ethertype); - - g[2].log_sz = 0; - - g[3].log_sz = 14; - g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria, - outer_headers.dmac_47_16); - memset(dmac, 0xff, ETH_ALEN); - MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, - outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, - outer_headers.ip_protocol); - - g[4].log_sz = 13; - g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria, - outer_headers.dmac_47_16); - memset(dmac, 0xff, ETH_ALEN); - MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria, - outer_headers.ethertype); - - g[5].log_sz = 11; - g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria, - outer_headers.dmac_47_16); - memset(dmac, 0xff, ETH_ALEN); - - g[6].log_sz = 2; - g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria, - outer_headers.dmac_47_16); - dmac[0] = 0x01; - MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, - outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, - outer_headers.ip_protocol); - - g[7].log_sz = 1; - g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria, - outer_headers.dmac_47_16); - dmac[0] = 0x01; - MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria, - outer_headers.ethertype); - - g[8].log_sz = 0; - g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria, - outer_headers.dmac_47_16); - dmac[0] = 0x01; - priv->ft.main = mlx5_create_flow_table(priv->mdev, 1, - MLX5_FLOW_TABLE_TYPE_NIC_RCV, - 9, g); - kfree(g); - - return priv->ft.main ? 0 : -ENOMEM; -} - -static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) -{ - mlx5_destroy_flow_table(priv->ft.main); -} - -static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) -{ - struct mlx5_flow_table_group *g; - - g = kcalloc(2, sizeof(*g), GFP_KERNEL); - if (!g) - return -ENOMEM; - - g[0].log_sz = 12; - g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, - outer_headers.first_vid); - - /* untagged + any vlan id */ - g[1].log_sz = 1; - g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, - outer_headers.vlan_tag); - - priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0, - MLX5_FLOW_TABLE_TYPE_NIC_RCV, - 2, g); - - kfree(g); - return priv->ft.vlan ? 0 : -ENOMEM; -} - -static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) -{ - mlx5_destroy_flow_table(priv->ft.vlan); -} - -int mlx5e_create_flow_tables(struct mlx5e_priv *priv) -{ - int err; - - err = mlx5e_create_main_flow_table(priv); - if (err) - return err; - - err = mlx5e_create_vlan_flow_table(priv); - if (err) - goto err_destroy_main_flow_table; - - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - if (err) - goto err_destroy_vlan_flow_table; - - return 0; - -err_destroy_vlan_flow_table: - mlx5e_destroy_vlan_flow_table(priv); - -err_destroy_main_flow_table: - mlx5e_destroy_main_flow_table(priv); - - return err; -} - -void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) -{ - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - mlx5e_destroy_vlan_flow_table(priv); - mlx5e_destroy_main_flow_table(priv); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c new file mode 100644 index 000000000000..80d81abc4820 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -0,0 +1,1224 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/list.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/mlx5/fs.h> +#include "en.h" + +#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) + +enum { + MLX5E_FULLMATCH = 0, + MLX5E_ALLMULTI = 1, + MLX5E_PROMISC = 2, +}; + +enum { + MLX5E_UC = 0, + MLX5E_MC_IPV4 = 1, + MLX5E_MC_IPV6 = 2, + MLX5E_MC_OTHER = 3, +}; + +enum { + MLX5E_ACTION_NONE = 0, + MLX5E_ACTION_ADD = 1, + MLX5E_ACTION_DEL = 2, +}; + +struct mlx5e_eth_addr_hash_node { + struct hlist_node hlist; + u8 action; + struct mlx5e_eth_addr_info ai; +}; + +static inline int mlx5e_hash_eth_addr(u8 *addr) +{ + return addr[5]; +} + +static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) +{ + struct mlx5e_eth_addr_hash_node *hn; + int ix = mlx5e_hash_eth_addr(addr); + int found = 0; + + hlist_for_each_entry(hn, &hash[ix], hlist) + if (ether_addr_equal_64bits(hn->ai.addr, addr)) { + found = 1; + break; + } + + if (found) { + hn->action = MLX5E_ACTION_NONE; + return; + } + + hn = kzalloc(sizeof(*hn), GFP_ATOMIC); + if (!hn) + return; + + ether_addr_copy(hn->ai.addr, addr); + hn->action = MLX5E_ACTION_ADD; + + hlist_add_head(&hn->hlist, &hash[ix]); +} + +static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) +{ + hlist_del(&hn->hlist); + kfree(hn); +} + +static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai) +{ + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); + + if (ai->tt_vec & BIT(MLX5E_TT_ANY)) + mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); +} + +static int mlx5e_get_eth_addr_type(u8 *addr) +{ + if (is_unicast_ether_addr(addr)) + return MLX5E_UC; + + if ((addr[0] == 0x01) && + (addr[1] == 0x00) && + (addr[2] == 0x5e) && + !(addr[3] & 0x80)) + return MLX5E_MC_IPV4; + + if ((addr[0] == 0x33) && + (addr[1] == 0x33)) + return MLX5E_MC_IPV6; + + return MLX5E_MC_OTHER; +} + +static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) +{ + int eth_addr_type; + u32 ret; + + switch (type) { + case MLX5E_FULLMATCH: + eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); + switch (eth_addr_type) { + case MLX5E_UC: + ret = + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | + 0; + break; + + case MLX5E_MC_IPV4: + ret = + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV4) | + 0; + break; + + case MLX5E_MC_IPV6: + ret = + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV6) | + 0; + break; + + case MLX5E_MC_OTHER: + ret = + BIT(MLX5E_TT_ANY) | + 0; + break; + } + + break; + + case MLX5E_ALLMULTI: + ret = + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | + 0; + break; + + default: /* MLX5E_PROMISC */ + ret = + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | + 0; + break; + } + + return ret; +} + +static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, + int type, u32 *mc, u32 *mv) +{ + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + struct mlx5_flow_rule **rule_p; + struct mlx5_flow_table *ft = priv->fts.main.t; + u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, + outer_headers.dmac_47_16); + u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, + outer_headers.dmac_47_16); + u32 *tirn = priv->tirn; + u32 tt_vec; + int err = 0; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + + switch (type) { + case MLX5E_FULLMATCH: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + eth_broadcast_addr(mc_dmac); + ether_addr_copy(mv_dmac, ai->addr); + break; + + case MLX5E_ALLMULTI: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + mc_dmac[0] = 0x01; + mv_dmac[0] = 0x01; + break; + + case MLX5E_PROMISC: + break; + } + + tt_vec = mlx5e_get_tt_vec(ai, type); + + if (tt_vec & BIT(MLX5E_TT_ANY)) { + rule_p = &ai->ft_rule[MLX5E_TT_ANY]; + dest.tir_num = tirn[MLX5E_TT_ANY]; + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_ANY); + } + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + if (tt_vec & BIT(MLX5E_TT_IPV4)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; + dest.tir_num = tirn[MLX5E_TT_IPV4]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; + dest.tir_num = tirn[MLX5E_TT_IPV6]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6); + } + + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); + + if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; + dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; + dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); + } + + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); + + if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; + dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; + dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); + } + + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); + + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; + dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; + dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); + } + + MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); + + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; + dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IP); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); + } + + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { + rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; + dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; + MLX5_SET(fte_match_param, mv, outer_headers.ethertype, + ETH_P_IPV6); + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR_OR_NULL(*rule_p)) + goto err_del_ai; + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); + } + + return 0; + +err_del_ai: + err = PTR_ERR(*rule_p); + *rule_p = NULL; + mlx5e_del_eth_addr_from_flow_table(priv, ai); + + return err; +} + +static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type) +{ + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_eth_addr_rule_out; + } + + err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria, + match_value); + +add_eth_addr_rule_out: + kvfree(match_criteria); + kvfree(match_value); + + return err; +} + +static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) +{ + struct net_device *ndev = priv->netdev; + int max_list_size; + int list_size; + u16 *vlans; + int vlan; + int err; + int i; + + list_size = 0; + for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) + list_size++; + + max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); + + if (list_size > max_list_size) { + netdev_warn(ndev, + "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", + list_size, max_list_size); + list_size = max_list_size; + } + + vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); + if (!vlans) + return -ENOMEM; + + i = 0; + for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { + if (i >= list_size) + break; + vlans[i++] = vlan; + } + + err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); + if (err) + netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n", + err); + + kfree(vlans); + return err; +} + +enum mlx5e_vlan_rule_type { + MLX5E_VLAN_RULE_TYPE_UNTAGGED, + MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_VID, +}; + +static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, + u16 vid, u32 *mc, u32 *mv) +{ + struct mlx5_flow_table *ft = priv->fts.vlan.t; + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + struct mlx5_flow_rule **rule_p; + int err = 0; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fts.main.t; + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + rule_p = &priv->vlan.untagged_rule; + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + rule_p = &priv->vlan.any_vlan_rule; + MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); + break; + default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ + rule_p = &priv->vlan.active_vlans_rule[vid]; + MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); + MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); + break; + } + + *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + &dest); + + if (IS_ERR(*rule_p)) { + err = PTR_ERR(*rule_p); + *rule_p = NULL; + netdev_err(priv->netdev, "%s: add rule failed\n", __func__); + } + + return err; +} + +static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_vlan_rule_out; + } + + if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) + mlx5e_vport_context_update_vlans(priv); + + err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, + match_value); + +add_vlan_rule_out: + kvfree(match_criteria); + kvfree(match_value); + + return err; +} + +static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + if (priv->vlan.untagged_rule) { + mlx5_del_flow_rule(priv->vlan.untagged_rule); + priv->vlan.untagged_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + if (priv->vlan.any_vlan_rule) { + mlx5_del_flow_rule(priv->vlan.any_vlan_rule); + priv->vlan.any_vlan_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_MATCH_VID: + mlx5e_vport_context_update_vlans(priv); + if (priv->vlan.active_vlans_rule[vid]) { + mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]); + priv->vlan.active_vlans_rule[vid] = NULL; + } + mlx5e_vport_context_update_vlans(priv); + break; + } +} + +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) +{ + if (!priv->vlan.filter_disabled) + return; + + priv->vlan.filter_disabled = false; + if (priv->netdev->flags & IFF_PROMISC) + return; + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); +} + +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) +{ + if (priv->vlan.filter_disabled) + return; + + priv->vlan.filter_disabled = true; + if (priv->netdev->flags & IFF_PROMISC) + return; + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); +} + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + set_bit(vid, priv->vlan.active_vlans); + + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + clear_bit(vid, priv->vlan.active_vlans); + + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + + return 0; +} + +#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) + +static void mlx5e_execute_action(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_hash_node *hn) +{ + switch (hn->action) { + case MLX5E_ACTION_ADD: + mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); + hn->action = MLX5E_ACTION_NONE; + break; + + case MLX5E_ACTION_DEL: + mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); + mlx5e_del_eth_addr_from_hash(hn); + break; + } +} + +static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, + priv->netdev->dev_addr); + + netdev_for_each_uc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); + + netdev_for_each_mc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); + + netif_addr_unlock_bh(netdev); +} + +static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, + u8 addr_array[][ETH_ALEN], int size) +{ + bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); + struct net_device *ndev = priv->netdev; + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_head *addr_list; + struct hlist_node *tmp; + int i = 0; + int hi; + + addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + + if (is_uc) /* Make sure our own address is pushed first */ + ether_addr_copy(addr_array[i++], ndev->dev_addr); + else if (priv->eth_addr.broadcast_enabled) + ether_addr_copy(addr_array[i++], ndev->broadcast); + + mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { + if (ether_addr_equal(ndev->dev_addr, hn->ai.addr)) + continue; + if (i >= size) + break; + ether_addr_copy(addr_array[i++], hn->ai.addr); + } +} + +static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, + int list_type) +{ + bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); + struct mlx5e_eth_addr_hash_node *hn; + u8 (*addr_array)[ETH_ALEN] = NULL; + struct hlist_head *addr_list; + struct hlist_node *tmp; + int max_size; + int size; + int err; + int hi; + + size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); + max_size = is_uc ? + 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : + 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); + + addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) + size++; + + if (size > max_size) { + netdev_warn(priv->netdev, + "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", + is_uc ? "UC" : "MC", size, max_size); + size = max_size; + } + + if (size) { + addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); + if (!addr_array) { + err = -ENOMEM; + goto out; + } + mlx5e_fill_addr_array(priv, list_type, addr_array, size); + } + + err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); +out: + if (err) + netdev_err(priv->netdev, + "Failed to modify vport %s list err(%d)\n", + is_uc ? "UC" : "MC", err); + kfree(addr_array); +} + +static void mlx5e_vport_context_update(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + + mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); + mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); + mlx5_modify_nic_vport_promisc(priv->mdev, 0, + ea->allmulti_enabled, + ea->promisc_enabled); +} + +static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_execute_action(priv, hn); + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_execute_action(priv, hn); +} + +static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + hn->action = MLX5E_ACTION_DEL; + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + hn->action = MLX5E_ACTION_DEL; + + if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) + mlx5e_sync_netdev_addr(priv); + + mlx5e_apply_netdev_addr(priv); +} + +void mlx5e_set_rx_mode_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct net_device *ndev = priv->netdev; + + bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); + bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); + bool broadcast_enabled = rx_mode_enable; + + bool enable_promisc = !ea->promisc_enabled && promisc_enabled; + bool disable_promisc = ea->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; + bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; + bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; + + if (enable_promisc) { + mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (!priv->vlan.filter_disabled) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } + if (enable_allmulti) + mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + if (enable_broadcast) + mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + + mlx5e_handle_netdev_addr(priv); + + if (disable_broadcast) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); + if (disable_allmulti) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); + if (disable_promisc) { + if (!priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + } + + ea->promisc_enabled = promisc_enabled; + ea->allmulti_enabled = allmulti_enabled; + ea->broadcast_enabled = broadcast_enabled; + + mlx5e_vport_context_update(priv); +} + +static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) +{ + int i; + + for (i = ft->num_groups - 1; i >= 0; i--) { + if (!IS_ERR_OR_NULL(ft->g[i])) + mlx5_destroy_flow_group(ft->g[i]); + ft->g[i] = NULL; + } + ft->num_groups = 0; +} + +void mlx5e_init_eth_addr(struct mlx5e_priv *priv) +{ + ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); +} + +#define MLX5E_MAIN_GROUP0_SIZE BIT(3) +#define MLX5E_MAIN_GROUP1_SIZE BIT(1) +#define MLX5E_MAIN_GROUP2_SIZE BIT(0) +#define MLX5E_MAIN_GROUP3_SIZE BIT(14) +#define MLX5E_MAIN_GROUP4_SIZE BIT(13) +#define MLX5E_MAIN_GROUP5_SIZE BIT(11) +#define MLX5E_MAIN_GROUP6_SIZE BIT(2) +#define MLX5E_MAIN_GROUP7_SIZE BIT(1) +#define MLX5E_MAIN_GROUP8_SIZE BIT(0) +#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ + MLX5E_MAIN_GROUP1_SIZE +\ + MLX5E_MAIN_GROUP2_SIZE +\ + MLX5E_MAIN_GROUP3_SIZE +\ + MLX5E_MAIN_GROUP4_SIZE +\ + MLX5E_MAIN_GROUP5_SIZE +\ + MLX5E_MAIN_GROUP6_SIZE +\ + MLX5E_MAIN_GROUP7_SIZE +\ + MLX5E_MAIN_GROUP8_SIZE) + +static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in, + int inlen) +{ + u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, + match_criteria.outer_headers.dmac_47_16); + int err; + int ix = 0; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP0_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + eth_broadcast_addr(dmac); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + eth_broadcast_addr(dmac); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP4_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + eth_broadcast_addr(dmac); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP5_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + dmac[0] = 0x01; + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP6_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + dmac[0] = 0x01; + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP7_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + dmac[0] = 0x01; + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_MAIN_GROUP8_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + return 0; + +err_destroy_groups: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + + return err; +} + +static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft) +{ + u32 *in; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + err = __mlx5e_create_main_groups(ft, in, inlen); + + kvfree(in); + return err; +} + +static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *ft = &priv->fts.main; + int err; + + ft->num_groups = 0; + ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE); + + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) { + err = -ENOMEM; + goto err_destroy_main_flow_table; + } + + err = mlx5e_create_main_groups(ft); + if (err) + goto err_free_g; + return 0; + +err_free_g: + kfree(ft->g); + +err_destroy_main_flow_table: + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; + + return err; +} + +static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) +{ + mlx5e_destroy_groups(ft); + kfree(ft->g); + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; +} + +static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_flow_table(&priv->fts.main); +} + +#define MLX5E_NUM_VLAN_GROUPS 2 +#define MLX5E_VLAN_GROUP0_SIZE BIT(12) +#define MLX5E_VLAN_GROUP1_SIZE BIT(1) +#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ + MLX5E_VLAN_GROUP1_SIZE) + +static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in, + int inlen) +{ + int err; + int ix = 0; + u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP0_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + + return 0; + +err_destroy_groups: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + + return err; +} + +static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) +{ + u32 *in; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + err = __mlx5e_create_vlan_groups(ft, in, inlen); + + kvfree(in); + return err; +} + +static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *ft = &priv->fts.vlan; + int err; + + ft->num_groups = 0; + ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE); + + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) { + err = -ENOMEM; + goto err_destroy_vlan_flow_table; + } + + err = mlx5e_create_vlan_groups(ft); + if (err) + goto err_free_g; + + return 0; + +err_free_g: + kfree(ft->g); + +err_destroy_vlan_flow_table: + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; + + return err; +} + +static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_flow_table(&priv->fts.vlan); +} + +int mlx5e_create_flow_tables(struct mlx5e_priv *priv) +{ + int err; + + priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); + + if (!priv->fts.ns) + return -EINVAL; + + err = mlx5e_create_vlan_flow_table(priv); + if (err) + return err; + + err = mlx5e_create_main_flow_table(priv); + if (err) + goto err_destroy_vlan_flow_table; + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + goto err_destroy_main_flow_table; + + return 0; + +err_destroy_main_flow_table: + mlx5e_destroy_main_flow_table(priv); +err_destroy_vlan_flow_table: + mlx5e_destroy_vlan_flow_table(priv); + + return err; +} + +void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) +{ + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + mlx5e_destroy_main_flow_table(priv); + mlx5e_destroy_vlan_flow_table(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1e52db32c73d..5c74a734f158 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -30,8 +30,9 @@ * SOFTWARE. */ -#include <linux/mlx5/flow_table.h> +#include <linux/mlx5/fs.h> #include "en.h" +#include "eswitch.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -63,7 +64,7 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv) u8 port_state; port_state = mlx5_query_vport_state(mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT); + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); if (port_state == VPORT_STATE_UP) netif_carrier_on(priv->netdev); @@ -350,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->pdev = c->pdev; rq->netdev = c->netdev; + rq->tstamp = &priv->tstamp; rq->channel = c; rq->ix = c->ix; rq->priv = c->priv; @@ -506,6 +508,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_sq_db(struct mlx5e_sq *sq) { + kfree(sq->wqe_info); kfree(sq->dma_fifo); kfree(sq->skb); } @@ -518,8 +521,10 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa); sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL, numa); + sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL, + numa); - if (!sq->skb || !sq->dma_fifo) { + if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) { mlx5e_free_sq_db(sq); return -ENOMEM; } @@ -567,6 +572,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); sq->pdev = c->pdev; + sq->tstamp = &priv->tstamp; sq->mkey_be = c->mkey_be; sq->channel = c; sq->tc = tc; @@ -1020,6 +1026,7 @@ err_close_tx_cqs: err_napi_del: netif_napi_del(&c->napi); + napi_hash_del(&c->napi); kfree(c); return err; @@ -1033,6 +1040,10 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_cq(&c->rq.cq); mlx5e_close_tx_cqs(c); netif_napi_del(&c->napi); + + napi_hash_del(&c->napi); + synchronize_rcu(); + kfree(c); } @@ -1421,6 +1432,7 @@ int mlx5e_open_locked(struct net_device *netdev) mlx5e_update_carrier(priv); mlx5e_redirect_rqts(priv); + mlx5e_timestamp_init(priv); schedule_delayed_work(&priv->update_stats_work, 0); @@ -1457,6 +1469,7 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); + mlx5e_timestamp_cleanup(priv); mlx5e_redirect_rqts(priv); netif_carrier_off(priv->netdev); mlx5e_close_channels(priv); @@ -1926,6 +1939,91 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) return err; } +static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return mlx5e_hwstamp_set(dev, ifr); + case SIOCGHWTSTAMP: + return mlx5e_hwstamp_get(dev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac); +} + +static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1, + vlan, qos); +} + +static int mlx5_vport_link2ifla(u8 esw_link) +{ + switch (esw_link) { + case MLX5_ESW_VPORT_ADMIN_STATE_DOWN: + return IFLA_VF_LINK_STATE_DISABLE; + case MLX5_ESW_VPORT_ADMIN_STATE_UP: + return IFLA_VF_LINK_STATE_ENABLE; + } + return IFLA_VF_LINK_STATE_AUTO; +} + +static int mlx5_ifla_link2vport(u8 ifla_link) +{ + switch (ifla_link) { + case IFLA_VF_LINK_STATE_DISABLE: + return MLX5_ESW_VPORT_ADMIN_STATE_DOWN; + case IFLA_VF_LINK_STATE_ENABLE: + return MLX5_ESW_VPORT_ADMIN_STATE_UP; + } + return MLX5_ESW_VPORT_ADMIN_STATE_AUTO; +} + +static int mlx5e_set_vf_link_state(struct net_device *dev, int vf, + int link_state) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1, + mlx5_ifla_link2vport(link_state)); +} + +static int mlx5e_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi); + if (err) + return err; + ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate); + return 0; +} + +static int mlx5e_get_vf_stats(struct net_device *dev, + int vf, struct ifla_vf_stats *vf_stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, + vf_stats); +} + static struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@ -1937,6 +2035,7 @@ static struct net_device_ops mlx5e_netdev_ops = { .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, + .ndo_do_ioctl = mlx5e_ioctl, }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -2023,7 +2122,12 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr); + mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr); + if (is_zero_ether_addr(netdev->dev_addr) && + !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { + eth_hw_addr_random(netdev); + mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr); + } } static void mlx5e_build_netdev(struct net_device *netdev) @@ -2036,6 +2140,14 @@ static void mlx5e_build_netdev(struct net_device *netdev) if (priv->params.num_tc > 1) mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac; + mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan; + mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config; + mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state; + mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats; + } + netdev->netdev_ops = &mlx5e_netdev_ops; netdev->watchdog_timeo = 15 * HZ; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index cf0098596e85..dd959d929aad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -33,8 +33,14 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> +#include <net/busy_poll.h> #include "en.h" +static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) +{ + return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; +} + static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) { @@ -189,6 +195,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, { struct net_device *netdev = rq->netdev; u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + struct mlx5e_tstamp *tstamp = rq->tstamp; int lro_num_seg; skb_put(skb, cqe_bcnt); @@ -201,6 +208,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, rq->stats.lro_bytes += cqe_bcnt; } + if (unlikely(mlx5e_rx_hw_stamp(tstamp))) + mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); + mlx5e_handle_csum(netdev, cqe, rq, skb); skb->protocol = eth_type_trans(skb, netdev); @@ -215,16 +225,16 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, be16_to_cpu(cqe->vlan_info)); } -bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) +int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - int i; + int work_done; /* avoid accessing cq (dma coherent memory) if not needed */ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return false; + return 0; - for (i = 0; i < budget; i++) { + for (work_done = 0; work_done < budget; work_done++) { struct mlx5e_rx_wqe *wqe; struct mlx5_cqe64 *cqe; struct sk_buff *skb; @@ -269,10 +279,8 @@ wq_ll_pop: /* ensure cq space is freed before enabling more cqes */ wmb(); - if (i == budget) { + if (work_done == budget) set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return true; - } - return false; + return work_done; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1341b1d3c421..2c3fba0fff54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -92,11 +92,11 @@ static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) return &sq->dma_fifo[i & sq->dma_fifo_mask]; } -static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) +static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma) { int i; - for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { + for (i = 0; i < num_dma; i++) { struct mlx5e_sq_dma *last_pushed_dma = mlx5e_dma_get(sq, --sq->dma_fifo_pc); @@ -139,19 +139,28 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, return MLX5E_MIN_INLINE; } -static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) +static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, + unsigned int *skb_len, + unsigned int len) +{ + *skb_len -= len; + *skb_data += len; +} + +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, + unsigned char **skb_data, + unsigned int *skb_len) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - skb_copy_from_linear_data(skb, vhdr, cpy1_sz); - skb_pull_inline(skb, cpy1_sz); + memcpy(vhdr, *skb_data, cpy1_sz); + mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); - skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto, - cpy2_sz); - skb_pull_inline(skb, cpy2_sz); + memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); + mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); } static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) @@ -160,11 +169,14 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) u16 pi = sq->pc & wq->sz_m1; struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi]; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; + unsigned char *skb_data = skb->data; + unsigned int skb_len = skb->len; u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; bool bf = false; @@ -192,8 +204,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) opcode = MLX5_OPCODE_LSO; ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); payload_len = skb->len - ihs; - MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + - (skb_shinfo(skb)->gso_segs - 1) * ihs; + wi->num_bytes = skb->len + + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.tso_packets++; sq->stats.tso_bytes += payload_len; } else { @@ -201,16 +213,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); - MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, - ETH_ZLEN); + wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs); + mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, + &skb_len); ihs += VLAN_HLEN; } else { - skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs); - skb_pull_inline(skb, ihs); + memcpy(eseg->inline_hdr_start, skb_data, ihs); + mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); } eseg->inline_hdr_sz = cpu_to_be16(ihs); @@ -220,11 +232,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) MLX5_SEND_WQE_DS); dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; - MLX5E_TX_SKB_CB(skb)->num_dma = 0; + wi->num_dma = 0; - headlen = skb_headlen(skb); + headlen = skb_len - skb->data_len; if (headlen) { - dma_addr = dma_map_single(sq->pdev, skb->data, headlen, + dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) goto dma_unmap_wqe_err; @@ -234,7 +246,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dseg->byte_count = cpu_to_be32(headlen); mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); - MLX5E_TX_SKB_CB(skb)->num_dma++; + wi->num_dma++; dseg++; } @@ -253,23 +265,25 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); - MLX5E_TX_SKB_CB(skb)->num_dma++; + wi->num_dma++; dseg++; } - ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma; + ds_cnt += wi->num_dma; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq->skb[pi] = skb; - MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt, - MLX5_SEND_WQEBB_NUM_DS); - sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + sq->pc += wi->num_wqebbs; + + netdev_tx_sent_queue(sq->txq, wi->num_bytes); - netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); @@ -280,7 +294,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) int bf_sz = 0; if (bf && sq->uar_bf_map) - bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3; + bf_sz = wi->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; mlx5e_tx_notify_hw(sq, wqe, bf_sz); @@ -297,7 +311,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dma_unmap_wqe_err: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, skb); + mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); dev_kfree_skb_any(skb); @@ -352,6 +366,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) wqe_counter = be16_to_cpu(cqe->wqe_counter); do { + struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; u16 ci; int j; @@ -360,6 +375,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ci = sqcc & sq->wq.sz_m1; skb = sq->skb[ci]; + wi = &sq->wqe_info[ci]; if (unlikely(!skb)) { /* nop */ sq->stats.nop++; @@ -367,7 +383,16 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) continue; } - for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP)) { + struct skb_shared_hwtstamps hwts = {}; + + mlx5e_fill_hwstamp(sq->tstamp, + get_cqe_ts(cqe), &hwts); + skb_tstamp_tx(skb, &hwts); + } + + for (j = 0; j < wi->num_dma; j++) { struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, dma_fifo_cc++); @@ -375,8 +400,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) } npkts++; - nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; - sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; dev_kfree_skb(skb); } while (!last_wqe); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 2c7cb6755d1d..4ac8d716dbdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -54,6 +54,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); bool busy = false; + int work_done; int i; clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); @@ -61,26 +62,26 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); - busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); - + work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); + busy |= work_done == budget; busy |= mlx5e_post_rx_wqes(&c->rq); if (busy) return budget; - napi_complete(napi); + napi_complete_done(napi, work_done); /* avoid losing completion event during/after polling cqs */ if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) { napi_schedule(napi); - return 0; + return work_done; } for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); mlx5e_cq_arm(&c->rq.cq); - return 0; + return work_done; } void mlx5e_completion_event(struct mlx5_core_cq *mcq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 713ead583347..23c244a7e5d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -35,6 +35,9 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" +#ifdef CONFIG_MLX5_CORE_EN +#include "eswitch.h" +#endif enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), @@ -287,6 +290,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) break; #endif +#ifdef CONFIG_MLX5_CORE_EN + case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: + mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); + break; +#endif default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -459,6 +467,11 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, pg)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); + if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && + MLX5_CAP_GEN(dev, vport_group_manager) && + mlx5_core_is_pf(dev)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c new file mode 100644 index 000000000000..bc3d9f8a75c1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -0,0 +1,1097 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include "mlx5_core.h" +#include "eswitch.h" + +#define UPLINK_VPORT 0xFFFF + +#define MLX5_DEBUG_ESWITCH_MASK BIT(3) + +#define esw_info(dev, format, ...) \ + pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_warn(dev, format, ...) \ + pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_debug(dev, format, ...) \ + mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) + +enum { + MLX5_ACTION_NONE = 0, + MLX5_ACTION_ADD = 1, + MLX5_ACTION_DEL = 2, +}; + +/* E-Switch UC L2 table hash node */ +struct esw_uc_addr { + struct l2addr_node node; + u32 table_index; + u32 vport; +}; + +/* E-Switch MC FDB table hash node */ +struct esw_mc_addr { /* SRIOV only */ + struct l2addr_node node; + struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */ + u32 refcnt; +}; + +/* Vport UC/MC hash node */ +struct vport_addr { + struct l2addr_node node; + u8 action; + u32 vport; + struct mlx5_flow_rule *flow_rule; /* SRIOV only */ +}; + +enum { + UC_ADDR_CHANGE = BIT(0), + MC_ADDR_CHANGE = BIT(1), +}; + +/* Vport context events */ +#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ + MC_ADDR_CHANGE) + +static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, + u32 events_mask) +{ + int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]; + int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + void *nic_vport_ctx; + int err; + + memset(out, 0, sizeof(out)); + memset(in, 0, sizeof(in)); + + MLX5_SET(modify_nic_vport_context_in, in, + opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, + in, nic_vport_context); + + MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); + + if (events_mask & UC_ADDR_CHANGE) + MLX5_SET(nic_vport_context, nic_vport_ctx, + event_on_uc_address_change, 1); + if (events_mask & MC_ADDR_CHANGE) + MLX5_SET(nic_vport_context, nic_vport_ctx, + event_on_mc_address_change, 1); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + goto ex; + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto ex; + return 0; +ex: + return err; +} + +/* E-Switch vport context HW commands */ +static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport, + u32 *out, int outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); + + MLX5_SET(query_esw_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_esw_vport_context_in, in, other_vport, 1); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); +} + +static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, + u16 *vlan, u8 *qos) +{ + u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)]; + int err; + bool cvlan_strip; + bool cvlan_insert; + + memset(out, 0, sizeof(out)); + + *vlan = 0; + *qos = 0; + + if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || + !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) + return -ENOTSUPP; + + err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out)); + if (err) + goto out; + + cvlan_strip = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.vport_cvlan_strip); + + cvlan_insert = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.vport_cvlan_insert); + + if (cvlan_strip || cvlan_insert) { + *vlan = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.cvlan_id); + *qos = MLX5_GET(query_esw_vport_context_out, out, + esw_vport_context.cvlan_pcp); + } + + esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n", + vport, *vlan, *qos); +out: + return err; +} + +static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, + void *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)]; + + memset(out, 0, sizeof(out)); + + MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + + MLX5_SET(modify_esw_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); + + return mlx5_cmd_exec_check_status(dev, in, inlen, + out, sizeof(out)); +} + +static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, + u16 vlan, u8 qos, bool set) +{ + u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)]; + + memset(in, 0, sizeof(in)); + + if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || + !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) + return -ENOTSUPP; + + esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n", + vport, vlan, qos, set); + + if (set) { + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_strip, 1); + /* insert only if no vlan in packet */ + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_insert, 1); + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.cvlan_pcp, qos); + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.cvlan_id, vlan); + } + + MLX5_SET(modify_esw_vport_context_in, in, + field_select.vport_cvlan_strip, 1); + MLX5_SET(modify_esw_vport_context_in, in, + field_select.vport_cvlan_insert, 1); + + return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); +} + +/* HW L2 Table (MPFS) management */ +static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, + u8 *mac, u8 vlan_valid, u16 vlan) +{ + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)]; + u8 *in_mac_addr; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(set_l2_table_entry_in, in, opcode, + MLX5_CMD_OP_SET_L2_TABLE_ENTRY); + MLX5_SET(set_l2_table_entry_in, in, table_index, index); + MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid); + MLX5_SET(set_l2_table_entry_in, in, vlan, vlan); + + in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); + ether_addr_copy(&in_mac_addr[2], mac); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, sizeof(out)); +} + +static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) +{ + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(delete_l2_table_entry_in, in, opcode, + MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); + MLX5_SET(delete_l2_table_entry_in, in, table_index, index); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, sizeof(out)); +} + +static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) +{ + int err = 0; + + *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size); + if (*ix >= l2_table->size) + err = -ENOSPC; + else + __set_bit(*ix, l2_table->bitmap); + + return err; +} + +static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix) +{ + __clear_bit(ix, l2_table->bitmap); +} + +static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac, + u8 vlan_valid, u16 vlan, + u32 *index) +{ + struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; + int err; + + err = alloc_l2_table_index(l2_table, index); + if (err) + return err; + + err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan); + if (err) + free_l2_table_index(l2_table, *index); + + return err; +} + +static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) +{ + struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; + + del_l2_table_entry_cmd(dev, index); + free_l2_table_index(l2_table, index); +} + +/* E-Switch FDB */ +static struct mlx5_flow_rule * +esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +{ + int match_header = MLX5_MATCH_OUTER_HEADERS; + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule = NULL; + u32 *match_v; + u32 *match_c; + u8 *dmac_v; + u8 *dmac_c; + + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_v || !match_c) { + pr_warn("FDB: Failed to alloc match parameters\n"); + goto out; + } + dmac_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers.dmac_47_16); + dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers.dmac_47_16); + + ether_addr_copy(dmac_v, mac); + /* Match criteria mask */ + memset(dmac_c, 0xff, 6); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = vport; + + esw_debug(esw->dev, + "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", + dmac_v, dmac_c, vport); + flow_rule = + mlx5_add_flow_rule(esw->fdb_table.fdb, + match_header, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR_OR_NULL(flow_rule)) { + pr_warn( + "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", + dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); + flow_rule = NULL; + } +out: + kfree(match_v); + kfree(match_c); + return flow_rule; +} + +static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; + struct mlx5_flow_group *g; + void *match_criteria; + int table_size; + u32 *flow_group_in; + u8 *dmac; + int err = 0; + + esw_debug(dev, "Create FDB log_max_size(%d)\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + return -ENOMEM; + } + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return -ENOMEM; + memset(flow_group_in, 0, inlen); + + table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + fdb = mlx5_create_flow_table(root_ns, 0, table_size); + if (IS_ERR_OR_NULL(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create FDB Table err %d\n", err); + goto out; + } + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + eth_broadcast_addr(dmac); + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create flow group err(%d)\n", err); + goto out; + } + + esw->fdb_table.addr_grp = g; + esw->fdb_table.fdb = fdb; +out: + kfree(flow_group_in); + if (err && !IS_ERR_OR_NULL(fdb)) + mlx5_destroy_flow_table(fdb); + return err; +} + +static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) +{ + if (!esw->fdb_table.fdb) + return; + + esw_debug(esw->dev, "Destroy FDB Table\n"); + mlx5_destroy_flow_group(esw->fdb_table.addr_grp); + mlx5_destroy_flow_table(esw->fdb_table.fdb); + esw->fdb_table.fdb = NULL; + esw->fdb_table.addr_grp = NULL; +} + +/* E-Switch vport UC/MC lists management */ +typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, + struct vport_addr *vaddr); + +static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->l2_table.l2_hash; + struct esw_uc_addr *esw_uc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + int err; + + esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); + if (esw_uc) { + esw_warn(esw->dev, + "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n", + mac, vport, esw_uc->vport); + return -EEXIST; + } + + esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL); + if (!esw_uc) + return -ENOMEM; + esw_uc->vport = vport; + + err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index); + if (err) + goto abort; + + if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */ + vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); + + esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", + vport, mac, esw_uc->table_index, vaddr->flow_rule); + return err; +abort: + l2addr_hash_del(esw_uc); + return err; +} + +static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->l2_table.l2_hash; + struct esw_uc_addr *esw_uc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + + esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); + if (!esw_uc || esw_uc->vport != vport) { + esw_debug(esw->dev, + "MAC(%pM) doesn't belong to vport (%d)\n", + mac, vport); + return -EINVAL; + } + esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n", + vport, mac, esw_uc->table_index, vaddr->flow_rule); + + del_l2_table_entry(esw->dev, esw_uc->table_index); + + if (vaddr->flow_rule) + mlx5_del_flow_rule(vaddr->flow_rule); + vaddr->flow_rule = NULL; + + l2addr_hash_del(esw_uc); + return 0; +} + +static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->mc_table; + struct esw_mc_addr *esw_mc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + + if (!esw->fdb_table.fdb) + return 0; + + esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); + if (esw_mc) + goto add; + + esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); + if (!esw_mc) + return -ENOMEM; + + esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ + esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT); +add: + esw_mc->refcnt++; + /* Forward MC MAC to vport */ + vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); + esw_debug(esw->dev, + "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", + vport, mac, vaddr->flow_rule, + esw_mc->refcnt, esw_mc->uplink_rule); + return 0; +} + +static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) +{ + struct hlist_head *hash = esw->mc_table; + struct esw_mc_addr *esw_mc; + u8 *mac = vaddr->node.addr; + u32 vport = vaddr->vport; + + if (!esw->fdb_table.fdb) + return 0; + + esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); + if (!esw_mc) { + esw_warn(esw->dev, + "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", + mac, vport); + return -EINVAL; + } + esw_debug(esw->dev, + "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", + vport, mac, vaddr->flow_rule, esw_mc->refcnt, + esw_mc->uplink_rule); + + if (vaddr->flow_rule) + mlx5_del_flow_rule(vaddr->flow_rule); + vaddr->flow_rule = NULL; + + if (--esw_mc->refcnt) + return 0; + + if (esw_mc->uplink_rule) + mlx5_del_flow_rule(esw_mc->uplink_rule); + + l2addr_hash_del(esw_mc); + return 0; +} + +/* Apply vport UC/MC list to HW l2 table and FDB table */ +static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, + u32 vport_num, int list_type) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; + vport_addr_action vport_addr_add; + vport_addr_action vport_addr_del; + struct vport_addr *addr; + struct l2addr_node *node; + struct hlist_head *hash; + struct hlist_node *tmp; + int hi; + + vport_addr_add = is_uc ? esw_add_uc_addr : + esw_add_mc_addr; + vport_addr_del = is_uc ? esw_del_uc_addr : + esw_del_mc_addr; + + hash = is_uc ? vport->uc_list : vport->mc_list; + for_each_l2hash_node(node, tmp, hash, hi) { + addr = container_of(node, struct vport_addr, node); + switch (addr->action) { + case MLX5_ACTION_ADD: + vport_addr_add(esw, addr); + addr->action = MLX5_ACTION_NONE; + break; + case MLX5_ACTION_DEL: + vport_addr_del(esw, addr); + l2addr_hash_del(addr); + break; + } + } +} + +/* Sync vport UC/MC list from vport context */ +static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, + u32 vport_num, int list_type) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; + u8 (*mac_list)[ETH_ALEN]; + struct l2addr_node *node; + struct vport_addr *addr; + struct hlist_head *hash; + struct hlist_node *tmp; + int size; + int err; + int hi; + int i; + + size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : + MLX5_MAX_MC_PER_VPORT(esw->dev); + + mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); + if (!mac_list) + return; + + hash = is_uc ? vport->uc_list : vport->mc_list; + + for_each_l2hash_node(node, tmp, hash, hi) { + addr = container_of(node, struct vport_addr, node); + addr->action = MLX5_ACTION_DEL; + } + + err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type, + mac_list, &size); + if (err) + return; + esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", + vport_num, is_uc ? "UC" : "MC", size); + + for (i = 0; i < size; i++) { + if (is_uc && !is_valid_ether_addr(mac_list[i])) + continue; + + if (!is_uc && !is_multicast_ether_addr(mac_list[i])) + continue; + + addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); + if (addr) { + addr->action = MLX5_ACTION_NONE; + continue; + } + + addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, + GFP_KERNEL); + if (!addr) { + esw_warn(esw->dev, + "Failed to add MAC(%pM) to vport[%d] DB\n", + mac_list[i], vport_num); + continue; + } + addr->vport = vport_num; + addr->action = MLX5_ACTION_ADD; + } + kfree(mac_list); +} + +static void esw_vport_change_handler(struct work_struct *work) +{ + struct mlx5_vport *vport = + container_of(work, struct mlx5_vport, vport_change_handler); + struct mlx5_core_dev *dev = vport->dev; + struct mlx5_eswitch *esw = dev->priv.eswitch; + u8 mac[ETH_ALEN]; + + mlx5_query_nic_vport_mac_address(dev, vport->vport, mac); + esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", + vport->vport, mac); + + if (vport->enabled_events & UC_ADDR_CHANGE) { + esw_update_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_UC); + esw_apply_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_UC); + } + + if (vport->enabled_events & MC_ADDR_CHANGE) { + esw_update_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_MC); + esw_apply_vport_addr_list(esw, vport->vport, + MLX5_NVPRT_LIST_TYPE_MC); + } + + esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); + if (vport->enabled) + arm_vport_context_events_cmd(dev, vport->vport, + vport->enabled_events); +} + +static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, + int enable_events) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + unsigned long flags; + + WARN_ON(vport->enabled); + + esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); + mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport_num, + MLX5_ESW_VPORT_ADMIN_STATE_AUTO); + + /* Sync with current vport context */ + vport->enabled_events = enable_events; + esw_vport_change_handler(&vport->vport_change_handler); + + spin_lock_irqsave(&vport->lock, flags); + vport->enabled = true; + spin_unlock_irqrestore(&vport->lock, flags); + + arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); + + esw->enabled_vports++; + esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); +} + +static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct l2addr_node *node; + struct vport_addr *addr; + struct hlist_node *tmp; + int hi; + + for_each_l2hash_node(node, tmp, vport->uc_list, hi) { + addr = container_of(node, struct vport_addr, node); + addr->action = MLX5_ACTION_DEL; + } + esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC); + + for_each_l2hash_node(node, tmp, vport->mc_list, hi) { + addr = container_of(node, struct vport_addr, node); + addr->action = MLX5_ACTION_DEL; + } + esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC); +} + +static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + unsigned long flags; + + if (!vport->enabled) + return; + + esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); + /* Mark this vport as disabled to discard new events */ + spin_lock_irqsave(&vport->lock, flags); + vport->enabled = false; + vport->enabled_events = 0; + spin_unlock_irqrestore(&vport->lock, flags); + + mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport_num, + MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + /* Wait for current already scheduled events to complete */ + flush_workqueue(esw->work_queue); + /* Disable events from this vport */ + arm_vport_context_events_cmd(esw->dev, vport->vport, 0); + /* We don't assume VFs will cleanup after themselves */ + esw_cleanup_vport(esw, vport_num); + esw->enabled_vports--; +} + +/* Public E-Switch API */ +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) +{ + int err; + int i; + + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return 0; + + if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || + !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { + esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); + return -ENOTSUPP; + } + + esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); + + esw_disable_vport(esw, 0); + + err = esw_create_fdb_table(esw, nvfs + 1); + if (err) + goto abort; + + for (i = 0; i <= nvfs; i++) + esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS); + + esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", + esw->enabled_vports); + return 0; + +abort: + esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + return err; +} + +void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) +{ + int i; + + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return; + + esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", + esw->enabled_vports); + + for (i = 0; i < esw->total_vports; i++) + esw_disable_vport(esw, i); + + esw_destroy_fdb_table(esw); + + /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ + esw_enable_vport(esw, 0, UC_ADDR_CHANGE); +} + +int mlx5_eswitch_init(struct mlx5_core_dev *dev) +{ + int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); + int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev); + struct mlx5_eswitch *esw; + int vport_num; + int err; + + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return 0; + + esw_info(dev, + "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n", + total_vports, l2_table_size, + MLX5_MAX_UC_PER_VPORT(dev), + MLX5_MAX_MC_PER_VPORT(dev)); + + esw = kzalloc(sizeof(*esw), GFP_KERNEL); + if (!esw) + return -ENOMEM; + + esw->dev = dev; + + esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size), + sizeof(uintptr_t), GFP_KERNEL); + if (!esw->l2_table.bitmap) { + err = -ENOMEM; + goto abort; + } + esw->l2_table.size = l2_table_size; + + esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); + if (!esw->work_queue) { + err = -ENOMEM; + goto abort; + } + + esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport), + GFP_KERNEL); + if (!esw->vports) { + err = -ENOMEM; + goto abort; + } + + for (vport_num = 0; vport_num < total_vports; vport_num++) { + struct mlx5_vport *vport = &esw->vports[vport_num]; + + vport->vport = vport_num; + vport->dev = dev; + INIT_WORK(&vport->vport_change_handler, + esw_vport_change_handler); + spin_lock_init(&vport->lock); + } + + esw->total_vports = total_vports; + esw->enabled_vports = 0; + + dev->priv.eswitch = esw; + esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + /* VF Vports will be enabled when SRIOV is enabled */ + return 0; +abort: + if (esw->work_queue) + destroy_workqueue(esw->work_queue); + kfree(esw->l2_table.bitmap); + kfree(esw->vports); + kfree(esw); + return err; +} + +void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) +{ + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return; + + esw_info(esw->dev, "cleanup\n"); + esw_disable_vport(esw, 0); + + esw->dev->priv.eswitch = NULL; + destroy_workqueue(esw->work_queue); + kfree(esw->l2_table.bitmap); + kfree(esw->vports); + kfree(esw); +} + +void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) +{ + struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change; + u16 vport_num = be16_to_cpu(vc_eqe->vport_num); + struct mlx5_vport *vport; + + if (!esw) { + pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", + vport_num); + return; + } + + vport = &esw->vports[vport_num]; + spin_lock(&vport->lock); + if (vport->enabled) + queue_work(esw->work_queue, &vport->vport_change_handler); + spin_unlock(&vport->lock); +} + +/* Vport Administration */ +#define ESW_ALLOWED(esw) \ + (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) +#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) + +int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, + int vport, u8 mac[ETH_ALEN]) +{ + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); + if (err) { + mlx5_core_warn(esw->dev, + "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", + vport, err); + return err; + } + + return err; +} + +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, + int vport, int link_state) +{ + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + return mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport, link_state); +} + +int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, + int vport, struct ifla_vf_info *ivi) +{ + u16 vlan; + u8 qos; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + memset(ivi, 0, sizeof(*ivi)); + ivi->vf = vport - 1; + + mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac); + ivi->linkstate = mlx5_query_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport); + query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos); + ivi->vlan = vlan; + ivi->qos = qos; + ivi->spoofchk = 0; + + return 0; +} + +int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, + int vport, u16 vlan, u8 qos) +{ + int set = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + if (vlan || qos) + set = 1; + + return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); +} + +int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, + int vport, + struct ifla_vf_stats *vf_stats) +{ + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + int err = 0; + u32 *out; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_COUNTER); + MLX5_SET(query_vport_counter_in, in, op_mod, 0); + MLX5_SET(query_vport_counter_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_vport_counter_in, in, other_vport, 1); + + memset(out, 0, outlen); + err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen); + if (err) + goto free_out; + + #define MLX5_GET_CTR(p, x) \ + MLX5_GET64(query_vport_counter_out, p, x) + + memset(vf_stats, 0, sizeof(*vf_stats)); + vf_stats->rx_packets = + MLX5_GET_CTR(out, received_eth_unicast.packets) + + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_eth_broadcast.packets); + + vf_stats->rx_bytes = + MLX5_GET_CTR(out, received_eth_unicast.octets) + + MLX5_GET_CTR(out, received_eth_multicast.octets) + + MLX5_GET_CTR(out, received_eth_broadcast.octets); + + vf_stats->tx_packets = + MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + + MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); + + vf_stats->tx_bytes = + MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + + MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); + + vf_stats->multicast = + MLX5_GET_CTR(out, received_eth_multicast.packets); + + vf_stats->broadcast = + MLX5_GET_CTR(out, received_eth_broadcast.packets); + +free_out: + kvfree(out); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h new file mode 100644 index 000000000000..3416a428f70f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_ESWITCH_H__ +#define __MLX5_ESWITCH_H__ + +#include <linux/if_ether.h> +#include <linux/if_link.h> +#include <linux/mlx5/device.h> + +#define MLX5_MAX_UC_PER_VPORT(dev) \ + (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) + +#define MLX5_MAX_MC_PER_VPORT(dev) \ + (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) + +#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) +#define MLX5_L2_ADDR_HASH(addr) (addr[5]) + +/* L2 -mac address based- hash helpers */ +struct l2addr_node { + struct hlist_node hlist; + u8 addr[ETH_ALEN]; +}; + +#define for_each_l2hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) + +#define l2addr_hash_find(hash, mac, type) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + bool found = false; \ + type *ptr = NULL; \ + \ + hlist_for_each_entry(ptr, &hash[ix], node.hlist) \ + if (ether_addr_equal(ptr->node.addr, mac)) {\ + found = true; \ + break; \ + } \ + if (!found) \ + ptr = NULL; \ + ptr; \ +}) + +#define l2addr_hash_add(hash, mac, type, gfp) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + type *ptr = NULL; \ + \ + ptr = kzalloc(sizeof(type), gfp); \ + if (ptr) { \ + ether_addr_copy(ptr->node.addr, mac); \ + hlist_add_head(&ptr->node.hlist, &hash[ix]);\ + } \ + ptr; \ +}) + +#define l2addr_hash_del(ptr) ({ \ + hlist_del(&ptr->node.hlist); \ + kfree(ptr); \ +}) + +struct mlx5_vport { + struct mlx5_core_dev *dev; + int vport; + struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; + struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; + struct work_struct vport_change_handler; + + /* This spinlock protects access to vport data, between + * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event" + * once vport marked as disabled new interrupts are discarded. + */ + spinlock_t lock; /* vport events sync */ + bool enabled; + u16 enabled_events; +}; + +struct mlx5_l2_table { + struct hlist_head l2_hash[MLX5_L2_ADDR_HASH_SIZE]; + u32 size; + unsigned long *bitmap; +}; + +struct mlx5_eswitch_fdb { + void *fdb; + struct mlx5_flow_group *addr_grp; +}; + +struct mlx5_eswitch { + struct mlx5_core_dev *dev; + struct mlx5_l2_table l2_table; + struct mlx5_eswitch_fdb fdb_table; + struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; + struct workqueue_struct *work_queue; + struct mlx5_vport *vports; + int total_vports; + int enabled_vports; +}; + +/* E-Switch API */ +int mlx5_eswitch_init(struct mlx5_core_dev *dev); +void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); +void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs); +void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); +int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, + int vport, u8 mac[ETH_ALEN]); +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, + int vport, int link_state); +int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, + int vport, u16 vlan, u8 qos); +int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, + int vport, struct ifla_vf_info *ivi); +int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, + int vport, + struct ifla_vf_stats *vf_stats); + +#endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c deleted file mode 100644 index ca90b9bc3b95..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c +++ /dev/null @@ -1,422 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/export.h> -#include <linux/mlx5/driver.h> -#include <linux/mlx5/flow_table.h> -#include "mlx5_core.h" - -struct mlx5_ftg { - struct mlx5_flow_table_group g; - u32 id; - u32 start_ix; -}; - -struct mlx5_flow_table { - struct mlx5_core_dev *dev; - u8 level; - u8 type; - u32 id; - struct mutex mutex; /* sync bitmap alloc */ - u16 num_groups; - struct mlx5_ftg *group; - unsigned long *bitmap; - u32 size; -}; - -static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix, - u32 flow_index, void *flow_context) -{ - u32 out[MLX5_ST_SZ_DW(set_fte_out)]; - u32 *in; - void *in_flow_context; - int fcdls = - MLX5_GET(flow_context, flow_context, destination_list_size) * - MLX5_ST_SZ_BYTES(dest_format_struct); - int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls; - int err; - - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); - return -ENOMEM; - } - - MLX5_SET(set_fte_in, in, table_type, ft->type); - MLX5_SET(set_fte_in, in, table_id, ft->id); - MLX5_SET(set_fte_in, in, flow_index, flow_index); - MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); - - in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); - memcpy(in_flow_context, flow_context, - MLX5_ST_SZ_BYTES(flow_context) + fcdls); - - MLX5_SET(flow_context, in_flow_context, group_id, - ft->group[group_ix].id); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, - sizeof(out)); - kvfree(in); - - return err; -} - -static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index) -{ - u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; - u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - -#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v) - MLX5_SET_DFTEI(in, table_type, ft->type); - MLX5_SET_DFTEI(in, table_id, ft->id); - MLX5_SET_DFTEI(in, flow_index, flow_index); - MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); - - mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); -} - -static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i) -{ - u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - -#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v) - MLX5_SET_DFGI(in, table_type, ft->type); - MLX5_SET_DFGI(in, table_id, ft->id); - MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); - MLX5_SET_DFGI(in, group_id, ft->group[i].id); - mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); -} - -static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i) -{ - u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; - u32 *in; - void *in_match_criteria; - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_table_group *g = &ft->group[i].g; - u32 start_ix = ft->group[i].start_ix; - u32 end_ix = start_ix + (1 << g->log_sz) - 1; - int err; - - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); - return -ENOMEM; - } - in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in, - match_criteria); - - memset(out, 0, sizeof(out)); - -#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v) - MLX5_SET_CFGI(in, table_type, ft->type); - MLX5_SET_CFGI(in, table_id, ft->id); - MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); - MLX5_SET_CFGI(in, start_flow_index, start_ix); - MLX5_SET_CFGI(in, end_flow_index, end_ix); - MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable); - - memcpy(in_match_criteria, g->match_criteria, - MLX5_ST_SZ_BYTES(fte_match_param)); - - err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, - sizeof(out)); - if (!err) - ft->group[i].id = MLX5_GET(create_flow_group_out, out, - group_id); - - kvfree(in); - - return err; -} - -static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft) -{ - int i; - - for (i = 0; i < ft->num_groups; i++) - mlx5_destroy_flow_group_cmd(ft, i); -} - -static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft) -{ - int err; - int i; - - for (i = 0; i < ft->num_groups; i++) { - err = mlx5_create_flow_group_cmd(ft, i); - if (err) - goto err_destroy_flow_table_groups; - } - - return 0; - -err_destroy_flow_table_groups: - for (i--; i >= 0; i--) - mlx5_destroy_flow_group_cmd(ft, i); - - return err; -} - -static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft) -{ - u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; - int err; - - memset(in, 0, sizeof(in)); - - MLX5_SET(create_flow_table_in, in, table_type, ft->type); - MLX5_SET(create_flow_table_in, in, level, ft->level); - MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size)); - - MLX5_SET(create_flow_table_in, in, opcode, - MLX5_CMD_OP_CREATE_FLOW_TABLE); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, - sizeof(out)); - if (err) - return err; - - ft->id = MLX5_GET(create_flow_table_out, out, table_id); - - return 0; -} - -static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft) -{ - u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - -#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v) - MLX5_SET_DFTI(in, table_type, ft->type); - MLX5_SET_DFTI(in, table_id, ft->id); - MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); - - mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); -} - -static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable, - u32 *match_criteria, int *group_ix) -{ - void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria, - outer_headers); - void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria, - misc_parameters); - void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria, - inner_headers); - int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); - int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc); - int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); - int i; - - for (i = 0; i < ft->num_groups; i++) { - struct mlx5_flow_table_group *g = &ft->group[i].g; - void *gmc_outer = MLX5_ADDR_OF(fte_match_param, - g->match_criteria, - outer_headers); - void *gmc_misc = MLX5_ADDR_OF(fte_match_param, - g->match_criteria, - misc_parameters); - void *gmc_inner = MLX5_ADDR_OF(fte_match_param, - g->match_criteria, - inner_headers); - - if (g->match_criteria_enable != match_criteria_enable) - continue; - - if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) - if (memcmp(mc_outer, gmc_outer, mc_outer_sz)) - continue; - - if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS) - if (memcmp(mc_misc, gmc_misc, mc_misc_sz)) - continue; - - if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS) - if (memcmp(mc_inner, gmc_inner, mc_inner_sz)) - continue; - - *group_ix = i; - return 0; - } - - return -EINVAL; -} - -static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix) -{ - struct mlx5_ftg *g = &ft->group[group_ix]; - int err = 0; - - mutex_lock(&ft->mutex); - - *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix); - if (*ix >= (g->start_ix + (1 << g->g.log_sz))) - err = -ENOSPC; - else - __set_bit(*ix, ft->bitmap); - - mutex_unlock(&ft->mutex); - - return err; -} - -static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix) -{ - __clear_bit(ix, ft->bitmap); -} - -int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, - void *match_criteria, void *flow_context, - u32 *flow_index) -{ - struct mlx5_flow_table *ft = flow_table; - int group_ix; - int err; - - err = mlx5_find_group(ft, match_criteria_enable, match_criteria, - &group_ix); - if (err) { - mlx5_core_warn(ft->dev, "mlx5_find_group failed\n"); - return err; - } - - err = alloc_flow_index(ft, group_ix, flow_index); - if (err) { - mlx5_core_warn(ft->dev, "alloc_flow_index failed\n"); - return err; - } - - return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context); -} -EXPORT_SYMBOL(mlx5_add_flow_table_entry); - -void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index) -{ - struct mlx5_flow_table *ft = flow_table; - - mlx5_del_flow_entry_cmd(ft, flow_index); - mlx5_free_flow_index(ft, flow_index); -} -EXPORT_SYMBOL(mlx5_del_flow_table_entry); - -void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, - u16 num_groups, - struct mlx5_flow_table_group *group) -{ - struct mlx5_flow_table *ft; - u32 start_ix = 0; - u32 ft_size = 0; - void *gr; - void *bm; - int err; - int i; - - for (i = 0; i < num_groups; i++) - ft_size += (1 << group[i].log_sz); - - ft = kzalloc(sizeof(*ft), GFP_KERNEL); - gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL); - bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL); - if (!ft || !gr || !bm) - goto err_free_ft; - - ft->group = gr; - ft->bitmap = bm; - ft->num_groups = num_groups; - ft->level = level; - ft->type = table_type; - ft->size = ft_size; - ft->dev = dev; - mutex_init(&ft->mutex); - - for (i = 0; i < ft->num_groups; i++) { - memcpy(&ft->group[i].g, &group[i], sizeof(*group)); - ft->group[i].start_ix = start_ix; - start_ix += 1 << group[i].log_sz; - } - - err = mlx5_create_flow_table_cmd(ft); - if (err) - goto err_free_ft; - - err = mlx5_create_flow_table_groups(ft); - if (err) - goto err_destroy_flow_table_cmd; - - return ft; - -err_destroy_flow_table_cmd: - mlx5_destroy_flow_table_cmd(ft); - -err_free_ft: - mlx5_core_warn(dev, "failed to alloc flow table\n"); - kfree(bm); - kfree(gr); - kfree(ft); - - return NULL; -} -EXPORT_SYMBOL(mlx5_create_flow_table); - -void mlx5_destroy_flow_table(void *flow_table) -{ - struct mlx5_flow_table *ft = flow_table; - - mlx5_destroy_flow_table_groups(ft); - mlx5_destroy_flow_table_cmd(ft); - kfree(ft->bitmap); - kfree(ft->group); - kfree(ft); -} -EXPORT_SYMBOL(mlx5_destroy_flow_table); - -u32 mlx5_get_flow_table_id(void *flow_table) -{ - struct mlx5_flow_table *ft = flow_table; - - return ft->id; -} -EXPORT_SYMBOL(mlx5_get_flow_table_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c new file mode 100644 index 000000000000..a9894d2e8e26 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> +#include <linux/mlx5/mlx5_ifc.h> + +#include "fs_core.h" +#include "fs_cmd.h" +#include "mlx5_core.h" + +int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); + MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + enum fs_flow_table_type type, unsigned int level, + unsigned int log_size, struct mlx5_flow_table + *next_ft, unsigned int *table_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(create_flow_table_in, in, opcode, + MLX5_CMD_OP_CREATE_FLOW_TABLE); + + if (next_ft) { + MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); + } + MLX5_SET(create_flow_table_in, in, table_type, type); + MLX5_SET(create_flow_table_in, in, level, level); + MLX5_SET(create_flow_table_in, in, log_size, log_size); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); + + if (!err) + *table_id = MLX5_GET(create_flow_table_out, out, + table_id); + return err; +} + +int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(destroy_flow_table_in, in, opcode, + MLX5_CMD_OP_DESTROY_FLOW_TABLE); + MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); + MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(modify_flow_table_in, in, opcode, + MLX5_CMD_OP_MODIFY_FLOW_TABLE); + MLX5_SET(modify_flow_table_in, in, table_type, ft->type); + MLX5_SET(modify_flow_table_in, in, table_id, ft->id); + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + } + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; + int err; + + memset(out, 0, sizeof(out)); + + MLX5_SET(create_flow_group_in, in, opcode, + MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET(create_flow_group_in, in, table_type, ft->type); + MLX5_SET(create_flow_group_in, in, table_id, ft->id); + + err = mlx5_cmd_exec_check_status(dev, in, + inlen, out, + sizeof(out)); + if (!err) + *group_id = MLX5_GET(create_flow_group_out, out, + group_id); + + return err; +} + +int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id) +{ + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(destroy_flow_group_in, in, opcode, + MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); + MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); + MLX5_SET(destroy_flow_group_in, in, group_id, group_id); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, + int opmod, int modify_mask, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte) +{ + unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + + fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); + u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + struct mlx5_flow_rule *dst; + void *in_flow_context; + void *in_match_value; + void *in_dests; + u32 *in; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + MLX5_SET(set_fte_in, in, op_mod, opmod); + MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask); + MLX5_SET(set_fte_in, in, table_type, ft->type); + MLX5_SET(set_fte_in, in, table_id, ft->id); + MLX5_SET(set_fte_in, in, flow_index, fte->index); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + MLX5_SET(flow_context, in_flow_context, group_id, group_id); + MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); + MLX5_SET(flow_context, in_flow_context, action, fte->action); + MLX5_SET(flow_context, in_flow_context, destination_list_size, + fte->dests_size); + in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, + match_value); + memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); + + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + list_for_each_entry(dst, &fte->node.children, node.list) { + unsigned int id; + + MLX5_SET(dest_format_struct, in_dests, destination_type, + dst->dest_attr.type); + if (dst->dest_attr.type == + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) + id = dst->dest_attr.ft->id; + else + id = dst->dest_attr.tir_num; + MLX5_SET(dest_format_struct, in_dests, destination_id, id); + in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + } + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, + sizeof(out)); + kvfree(in); + + return err; +} + +int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte) +{ + return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); +} + +int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte) +{ + int opmod; + int modify_mask; + int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive. + flow_modify_en); + if (!atomic_mod_cap) + return -ENOTSUPP; + opmod = 1; + modify_mask = 1 << + MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST; + + return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); +} + +int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int index) +{ + u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; + u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + MLX5_SET(delete_fte_in, in, table_type, ft->type); + MLX5_SET(delete_fte_in, in, table_id, ft->id); + MLX5_SET(delete_fte_in, in, flow_index, index); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h new file mode 100644 index 000000000000..9814d4784803 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_CMD_ +#define _MLX5_FS_CMD_ + +int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + enum fs_flow_table_type type, unsigned int level, + unsigned int log_size, struct mlx5_flow_table + *next_ft, unsigned int *table_id); + +int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft); + +int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft); + +int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, unsigned int *group_id); + +int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id); + +int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte); + +int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned group_id, + struct fs_fte *fte); + +int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int index); + +int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c new file mode 100644 index 000000000000..6f68dba8d7ed --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -0,0 +1,1514 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mutex.h> +#include <linux/mlx5/driver.h> + +#include "mlx5_core.h" +#include "fs_core.h" +#include "fs_cmd.h" + +#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ + sizeof(struct init_tree_node)) + +#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\ + ...) {.type = FS_TYPE_PRIO,\ + .min_ft_level = min_level_val,\ + .max_ft = max_ft_val,\ + .num_leaf_prios = num_prios_val,\ + .caps = caps_val,\ + .children = (struct init_tree_node[]) {__VA_ARGS__},\ + .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ +} + +#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\ + ADD_PRIO(num_prios_val, 0, max_ft_val, {},\ + __VA_ARGS__)\ + +#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ + .children = (struct init_tree_node[]) {__VA_ARGS__},\ + .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ +} + +#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\ + sizeof(long)) + +#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap)) + +#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ + .caps = (long[]) {__VA_ARGS__} } + +#define LEFTOVERS_MAX_FT 1 +#define LEFTOVERS_NUM_PRIOS 1 +#define BY_PASS_PRIO_MAX_FT 1 +#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ + LEFTOVERS_MAX_FT) + +#define KERNEL_MAX_FT 2 +#define KERNEL_NUM_PRIOS 1 +#define KENREL_MIN_LEVEL 2 + +struct node_caps { + size_t arr_sz; + long *caps; +}; +static struct init_tree_node { + enum fs_node_type type; + struct init_tree_node *children; + int ar_size; + struct node_caps caps; + int min_ft_level; + int num_leaf_prios; + int prio; + int max_ft; +} root_fs = { + .type = FS_TYPE_NAMESPACE, + .ar_size = 3, + .children = (struct init_tree_node[]) { + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, + FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), + FS_CAP(flow_table_properties_nic_receive.modify_root), + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))), + ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))), + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, + FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), + FS_CAP(flow_table_properties_nic_receive.modify_root), + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))), + } +}; + +enum fs_i_mutex_lock_class { + FS_MUTEX_GRANDPARENT, + FS_MUTEX_PARENT, + FS_MUTEX_CHILD +}; + +static void del_rule(struct fs_node *node); +static void del_flow_table(struct fs_node *node); +static void del_flow_group(struct fs_node *node); +static void del_fte(struct fs_node *node); + +static void tree_init_node(struct fs_node *node, + unsigned int refcount, + void (*remove_func)(struct fs_node *)) +{ + atomic_set(&node->refcount, refcount); + INIT_LIST_HEAD(&node->list); + INIT_LIST_HEAD(&node->children); + mutex_init(&node->lock); + node->remove_func = remove_func; +} + +static void tree_add_node(struct fs_node *node, struct fs_node *parent) +{ + if (parent) + atomic_inc(&parent->refcount); + node->parent = parent; + + /* Parent is the root */ + if (!parent) + node->root = node; + else + node->root = parent->root; +} + +static void tree_get_node(struct fs_node *node) +{ + atomic_inc(&node->refcount); +} + +static void nested_lock_ref_node(struct fs_node *node, + enum fs_i_mutex_lock_class class) +{ + if (node) { + mutex_lock_nested(&node->lock, class); + atomic_inc(&node->refcount); + } +} + +static void lock_ref_node(struct fs_node *node) +{ + if (node) { + mutex_lock(&node->lock); + atomic_inc(&node->refcount); + } +} + +static void unlock_ref_node(struct fs_node *node) +{ + if (node) { + atomic_dec(&node->refcount); + mutex_unlock(&node->lock); + } +} + +static void tree_put_node(struct fs_node *node) +{ + struct fs_node *parent_node = node->parent; + + lock_ref_node(parent_node); + if (atomic_dec_and_test(&node->refcount)) { + if (parent_node) + list_del_init(&node->list); + if (node->remove_func) + node->remove_func(node); + kfree(node); + node = NULL; + } + unlock_ref_node(parent_node); + if (!node && parent_node) + tree_put_node(parent_node); +} + +static int tree_remove_node(struct fs_node *node) +{ + if (atomic_read(&node->refcount) > 1) + return -EPERM; + tree_put_node(node); + return 0; +} + +static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, + unsigned int prio) +{ + struct fs_prio *iter_prio; + + fs_for_each_prio(iter_prio, ns) { + if (iter_prio->prio == prio) + return iter_prio; + } + + return NULL; +} + +static unsigned int find_next_free_level(struct fs_prio *prio) +{ + if (!list_empty(&prio->node.children)) { + struct mlx5_flow_table *ft; + + ft = list_last_entry(&prio->node.children, + struct mlx5_flow_table, + node.list); + return ft->level + 1; + } + return prio->start_level; +} + +static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++, mask++, val1++, val2++) + if ((*((u8 *)val1) & (*(u8 *)mask)) != + ((*(u8 *)val2) & (*(u8 *)mask))) + return false; + + return true; +} + +static bool compare_match_value(struct mlx5_flow_group_mask *mask, + void *fte_param1, void *fte_param2) +{ + if (mask->match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) { + void *fte_match1 = MLX5_ADDR_OF(fte_match_param, + fte_param1, outer_headers); + void *fte_match2 = MLX5_ADDR_OF(fte_match_param, + fte_param2, outer_headers); + void *fte_mask = MLX5_ADDR_OF(fte_match_param, + mask->match_criteria, outer_headers); + + if (!masked_memcmp(fte_mask, fte_match1, fte_match2, + MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) + return false; + } + + if (mask->match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) { + void *fte_match1 = MLX5_ADDR_OF(fte_match_param, + fte_param1, misc_parameters); + void *fte_match2 = MLX5_ADDR_OF(fte_match_param, + fte_param2, misc_parameters); + void *fte_mask = MLX5_ADDR_OF(fte_match_param, + mask->match_criteria, misc_parameters); + + if (!masked_memcmp(fte_mask, fte_match1, fte_match2, + MLX5_ST_SZ_BYTES(fte_match_set_misc))) + return false; + } + + if (mask->match_criteria_enable & + 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) { + void *fte_match1 = MLX5_ADDR_OF(fte_match_param, + fte_param1, inner_headers); + void *fte_match2 = MLX5_ADDR_OF(fte_match_param, + fte_param2, inner_headers); + void *fte_mask = MLX5_ADDR_OF(fte_match_param, + mask->match_criteria, inner_headers); + + if (!masked_memcmp(fte_mask, fte_match1, fte_match2, + MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4))) + return false; + } + return true; +} + +static bool compare_match_criteria(u8 match_criteria_enable1, + u8 match_criteria_enable2, + void *mask1, void *mask2) +{ + return match_criteria_enable1 == match_criteria_enable2 && + !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param)); +} + +static struct mlx5_flow_root_namespace *find_root(struct fs_node *node) +{ + struct fs_node *root; + struct mlx5_flow_namespace *ns; + + root = node->root; + + if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) { + pr_warn("mlx5: flow steering node is not in tree or garbaged\n"); + return NULL; + } + + ns = container_of(root, struct mlx5_flow_namespace, node); + return container_of(ns, struct mlx5_flow_root_namespace, ns); +} + +static inline struct mlx5_core_dev *get_dev(struct fs_node *node) +{ + struct mlx5_flow_root_namespace *root = find_root(node); + + if (root) + return root->dev; + return NULL; +} + +static void del_flow_table(struct fs_node *node) +{ + struct mlx5_flow_table *ft; + struct mlx5_core_dev *dev; + struct fs_prio *prio; + int err; + + fs_get_obj(ft, node); + dev = get_dev(&ft->node); + + err = mlx5_cmd_destroy_flow_table(dev, ft); + if (err) + pr_warn("flow steering can't destroy ft\n"); + fs_get_obj(prio, ft->node.parent); + prio->num_ft--; +} + +static void del_rule(struct fs_node *node) +{ + struct mlx5_flow_rule *rule; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct fs_fte *fte; + u32 *match_value; + struct mlx5_core_dev *dev = get_dev(node); + int match_len = MLX5_ST_SZ_BYTES(fte_match_param); + int err; + + match_value = mlx5_vzalloc(match_len); + if (!match_value) { + pr_warn("failed to allocate inbox\n"); + return; + } + + fs_get_obj(rule, node); + fs_get_obj(fte, rule->node.parent); + fs_get_obj(fg, fte->node.parent); + memcpy(match_value, fte->val, sizeof(fte->val)); + fs_get_obj(ft, fg->node.parent); + list_del(&rule->node.list); + fte->dests_size--; + if (fte->dests_size) { + err = mlx5_cmd_update_fte(dev, ft, + fg->id, fte); + if (err) + pr_warn("%s can't del rule fg id=%d fte_index=%d\n", + __func__, fg->id, fte->index); + } + kvfree(match_value); +} + +static void del_fte(struct fs_node *node) +{ + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_core_dev *dev; + struct fs_fte *fte; + int err; + + fs_get_obj(fte, node); + fs_get_obj(fg, fte->node.parent); + fs_get_obj(ft, fg->node.parent); + + dev = get_dev(&ft->node); + err = mlx5_cmd_delete_fte(dev, ft, + fte->index); + if (err) + pr_warn("flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); + + fte->status = 0; + fg->num_ftes--; +} + +static void del_flow_group(struct fs_node *node) +{ + struct mlx5_flow_group *fg; + struct mlx5_flow_table *ft; + struct mlx5_core_dev *dev; + + fs_get_obj(fg, node); + fs_get_obj(ft, fg->node.parent); + dev = get_dev(&ft->node); + + if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) + pr_warn("flow steering can't destroy fg %d of ft %d\n", + fg->id, ft->id); +} + +static struct fs_fte *alloc_fte(u8 action, + u32 flow_tag, + u32 *match_value, + unsigned int index) +{ + struct fs_fte *fte; + + fte = kzalloc(sizeof(*fte), GFP_KERNEL); + if (!fte) + return ERR_PTR(-ENOMEM); + + memcpy(fte->val, match_value, sizeof(fte->val)); + fte->node.type = FS_TYPE_FLOW_ENTRY; + fte->flow_tag = flow_tag; + fte->index = index; + fte->action = action; + + return fte; +} + +static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) +{ + struct mlx5_flow_group *fg; + void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, + create_fg_in, match_criteria); + u8 match_criteria_enable = MLX5_GET(create_flow_group_in, + create_fg_in, + match_criteria_enable); + fg = kzalloc(sizeof(*fg), GFP_KERNEL); + if (!fg) + return ERR_PTR(-ENOMEM); + + fg->mask.match_criteria_enable = match_criteria_enable; + memcpy(&fg->mask.match_criteria, match_criteria, + sizeof(fg->mask.match_criteria)); + fg->node.type = FS_TYPE_FLOW_GROUP; + fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in, + start_flow_index); + fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in, + end_flow_index) - fg->start_index + 1; + return fg; +} + +static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, + enum fs_flow_table_type table_type) +{ + struct mlx5_flow_table *ft; + + ft = kzalloc(sizeof(*ft), GFP_KERNEL); + if (!ft) + return NULL; + + ft->level = level; + ft->node.type = FS_TYPE_FLOW_TABLE; + ft->type = table_type; + ft->max_fte = max_fte; + + return ft; +} + +/* If reverse is false, then we search for the first flow table in the + * root sub-tree from start(closest from right), else we search for the + * last flow table in the root sub-tree till start(closest from left). + */ +static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, + struct list_head *start, + bool reverse) +{ +#define list_advance_entry(pos, reverse) \ + ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list)) + +#define list_for_each_advance_continue(pos, head, reverse) \ + for (pos = list_advance_entry(pos, reverse); \ + &pos->list != (head); \ + pos = list_advance_entry(pos, reverse)) + + struct fs_node *iter = list_entry(start, struct fs_node, list); + struct mlx5_flow_table *ft = NULL; + + if (!root) + return NULL; + + list_for_each_advance_continue(iter, &root->children, reverse) { + if (iter->type == FS_TYPE_FLOW_TABLE) { + fs_get_obj(ft, iter); + return ft; + } + ft = find_closest_ft_recursive(iter, &iter->children, reverse); + if (ft) + return ft; + } + + return ft; +} + +/* If reverse if false then return the first flow table in next priority of + * prio in the tree, else return the last flow table in the previous priority + * of prio in the tree. + */ +static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse) +{ + struct mlx5_flow_table *ft = NULL; + struct fs_node *curr_node; + struct fs_node *parent; + + parent = prio->node.parent; + curr_node = &prio->node; + while (!ft && parent) { + ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); + curr_node = parent; + parent = curr_node->parent; + } + return ft; +} + +/* Assuming all the tree is locked by mutex chain lock */ +static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio) +{ + return find_closest_ft(prio, false); +} + +/* Assuming all the tree is locked by mutex chain lock */ +static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) +{ + return find_closest_ft(prio, true); +} + +static int connect_fts_in_prio(struct mlx5_core_dev *dev, + struct fs_prio *prio, + struct mlx5_flow_table *ft) +{ + struct mlx5_flow_table *iter; + int i = 0; + int err; + + fs_for_each_ft(iter, prio) { + i++; + err = mlx5_cmd_modify_flow_table(dev, + iter, + ft); + if (err) { + mlx5_core_warn(dev, "Failed to modify flow table %d\n", + iter->id); + /* The driver is out of sync with the FW */ + if (i > 1) + WARN_ON(true); + return err; + } + } + return 0; +} + +/* Connect flow tables from previous priority of prio to ft */ +static int connect_prev_fts(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + struct mlx5_flow_table *prev_ft; + + prev_ft = find_prev_chained_ft(prio); + if (prev_ft) { + struct fs_prio *prev_prio; + + fs_get_obj(prev_prio, prev_ft->node.parent); + return connect_fts_in_prio(dev, prev_prio, ft); + } + return 0; +} + +static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio + *prio) +{ + struct mlx5_flow_root_namespace *root = find_root(&prio->node); + int min_level = INT_MAX; + int err; + + if (root->root_ft) + min_level = root->root_ft->level; + + if (ft->level >= min_level) + return 0; + + err = mlx5_cmd_update_root_ft(root->dev, ft); + if (err) + mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", + ft->id); + else + root->root_ft = ft; + + return err; +} + +static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + int err = 0; + + /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ + + if (list_empty(&prio->node.children)) { + err = connect_prev_fts(dev, ft, prio); + if (err) + return err; + } + + if (MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive.modify_root)) + err = update_root_ft_create(ft, prio); + return err; +} + +struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int max_fte) +{ + struct mlx5_flow_table *next_ft = NULL; + struct mlx5_flow_table *ft; + int err; + int log_table_sz; + struct mlx5_flow_root_namespace *root = + find_root(&ns->node); + struct fs_prio *fs_prio = NULL; + + if (!root) { + pr_err("mlx5: flow steering failed to find root of namespace\n"); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&root->chain_lock); + fs_prio = find_prio(ns, prio); + if (!fs_prio) { + err = -EINVAL; + goto unlock_root; + } + if (fs_prio->num_ft == fs_prio->max_ft) { + err = -ENOSPC; + goto unlock_root; + } + + ft = alloc_flow_table(find_next_free_level(fs_prio), + roundup_pow_of_two(max_fte), + root->table_type); + if (!ft) { + err = -ENOMEM; + goto unlock_root; + } + + tree_init_node(&ft->node, 1, del_flow_table); + log_table_sz = ilog2(ft->max_fte); + next_ft = find_next_chained_ft(fs_prio); + err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level, + log_table_sz, next_ft, &ft->id); + if (err) + goto free_ft; + + err = connect_flow_table(root->dev, ft, fs_prio); + if (err) + goto destroy_ft; + lock_ref_node(&fs_prio->node); + tree_add_node(&ft->node, &fs_prio->node); + list_add_tail(&ft->node.list, &fs_prio->node.children); + fs_prio->num_ft++; + unlock_ref_node(&fs_prio->node); + mutex_unlock(&root->chain_lock); + return ft; +destroy_ft: + mlx5_cmd_destroy_flow_table(root->dev, ft); +free_ft: + kfree(ft); +unlock_root: + mutex_unlock(&root->chain_lock); + return ERR_PTR(err); +} + +struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + int max_num_groups) +{ + struct mlx5_flow_table *ft; + + if (max_num_groups > num_flow_table_entries) + return ERR_PTR(-EINVAL); + + ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries); + if (IS_ERR(ft)) + return ft; + + ft->autogroup.active = true; + ft->autogroup.required_groups = max_num_groups; + + return ft; +} +EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); + +/* Flow table should be locked */ +static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft, + u32 *fg_in, + struct list_head + *prev_fg, + bool is_auto_fg) +{ + struct mlx5_flow_group *fg; + struct mlx5_core_dev *dev = get_dev(&ft->node); + int err; + + if (!dev) + return ERR_PTR(-ENODEV); + + fg = alloc_flow_group(fg_in); + if (IS_ERR(fg)) + return fg; + + err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); + if (err) { + kfree(fg); + return ERR_PTR(err); + } + + if (ft->autogroup.active) + ft->autogroup.num_groups++; + /* Add node to tree */ + tree_init_node(&fg->node, !is_auto_fg, del_flow_group); + tree_add_node(&fg->node, &ft->node); + /* Add node to group list */ + list_add(&fg->node.list, ft->node.children.prev); + + return fg; +} + +struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, + u32 *fg_in) +{ + struct mlx5_flow_group *fg; + + if (ft->autogroup.active) + return ERR_PTR(-EPERM); + + lock_ref_node(&ft->node); + fg = create_flow_group_common(ft, fg_in, &ft->node.children, false); + unlock_ref_node(&ft->node); + + return fg; +} + +static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_rule *rule; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return NULL; + + rule->node.type = FS_TYPE_FLOW_DEST; + memcpy(&rule->dest_attr, dest, sizeof(*dest)); + + return rule; +} + +/* fte should not be deleted while calling this function */ +static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, + struct mlx5_flow_group *fg, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_table *ft; + struct mlx5_flow_rule *rule; + int err; + + rule = alloc_rule(dest); + if (!rule) + return ERR_PTR(-ENOMEM); + + fs_get_obj(ft, fg->node.parent); + /* Add dest to dests list- added as first element after the head */ + tree_init_node(&rule->node, 1, del_rule); + list_add_tail(&rule->node.list, &fte->node.children); + fte->dests_size++; + if (fte->dests_size == 1) + err = mlx5_cmd_create_fte(get_dev(&ft->node), + ft, fg->id, fte); + else + err = mlx5_cmd_update_fte(get_dev(&ft->node), + ft, fg->id, fte); + if (err) + goto free_rule; + + fte->status |= FS_FTE_STATUS_EXISTING; + + return rule; + +free_rule: + list_del(&rule->node.list); + kfree(rule); + fte->dests_size--; + return ERR_PTR(err); +} + +/* Assumed fg is locked */ +static unsigned int get_free_fte_index(struct mlx5_flow_group *fg, + struct list_head **prev) +{ + struct fs_fte *fte; + unsigned int start = fg->start_index; + + if (prev) + *prev = &fg->node.children; + + /* assumed list is sorted by index */ + fs_for_each_fte(fte, fg) { + if (fte->index != start) + return start; + start++; + if (prev) + *prev = &fte->node.list; + } + + return start; +} + +/* prev is output, prev->next = new_fte */ +static struct fs_fte *create_fte(struct mlx5_flow_group *fg, + u32 *match_value, + u8 action, + u32 flow_tag, + struct list_head **prev) +{ + struct fs_fte *fte; + int index; + + index = get_free_fte_index(fg, prev); + fte = alloc_fte(action, flow_tag, match_value, index); + if (IS_ERR(fte)) + return fte; + + return fte; +} + +static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct list_head *prev = &ft->node.children; + unsigned int candidate_index = 0; + struct mlx5_flow_group *fg; + void *match_criteria_addr; + unsigned int group_size = 0; + u32 *in; + + if (!ft->autogroup.active) + return ERR_PTR(-ENOENT); + + in = mlx5_vzalloc(inlen); + if (!in) + return ERR_PTR(-ENOMEM); + + if (ft->autogroup.num_groups < ft->autogroup.required_groups) + /* We save place for flow groups in addition to max types */ + group_size = ft->max_fte / (ft->autogroup.required_groups + 1); + + /* ft->max_fte == ft->autogroup.max_types */ + if (group_size == 0) + group_size = 1; + + /* sorted by start_index */ + fs_for_each_fg(fg, ft) { + if (candidate_index + group_size > fg->start_index) + candidate_index = fg->start_index + fg->max_ftes; + else + break; + prev = &fg->node.list; + } + + if (candidate_index + group_size > ft->max_fte) { + fg = ERR_PTR(-ENOSPC); + goto out; + } + + MLX5_SET(create_flow_group_in, in, match_criteria_enable, + match_criteria_enable); + MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index); + MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index + + group_size - 1); + match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, + in, match_criteria); + memcpy(match_criteria_addr, match_criteria, + MLX5_ST_SZ_BYTES(fte_match_param)); + + fg = create_flow_group_common(ft, in, prev, true); +out: + kvfree(in); + return fg; +} + +static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, + u32 *match_value, + u8 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct fs_fte *fte; + struct mlx5_flow_rule *rule; + struct mlx5_flow_table *ft; + struct list_head *prev; + + nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT); + fs_for_each_fte(fte, fg) { + nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); + if (compare_match_value(&fg->mask, match_value, &fte->val) && + action == fte->action && flow_tag == fte->flow_tag) { + rule = add_rule_fte(fte, fg, dest); + unlock_ref_node(&fte->node); + if (IS_ERR(rule)) + goto unlock_fg; + else + goto add_rule; + } + unlock_ref_node(&fte->node); + } + fs_get_obj(ft, fg->node.parent); + if (fg->num_ftes >= fg->max_ftes) { + rule = ERR_PTR(-ENOSPC); + goto unlock_fg; + } + + fte = create_fte(fg, match_value, action, flow_tag, &prev); + if (IS_ERR(fte)) { + rule = (void *)fte; + goto unlock_fg; + } + tree_init_node(&fte->node, 0, del_fte); + rule = add_rule_fte(fte, fg, dest); + if (IS_ERR(rule)) { + kfree(fte); + goto unlock_fg; + } + + fg->num_ftes++; + + tree_add_node(&fte->node, &fg->node); + list_add(&fte->node.list, prev); +add_rule: + tree_add_node(&rule->node, &fte->node); +unlock_fg: + unlock_ref_node(&fg->node); + return rule; +} + +static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u8 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_rule *rule; + struct mlx5_flow_group *g; + + g = create_autogroup(ft, match_criteria_enable, match_criteria); + if (IS_ERR(g)) + return (void *)g; + + rule = add_rule_fg(g, match_value, + action, flow_tag, dest); + if (IS_ERR(rule)) { + /* Remove assumes refcount > 0 and autogroup creates a group + * with a refcount = 0. + */ + tree_get_node(&g->node); + tree_remove_node(&g->node); + } + return rule; +} + +struct mlx5_flow_rule * +mlx5_add_flow_rule(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u32 action, + u32 flow_tag, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_group *g; + struct mlx5_flow_rule *rule; + + nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); + fs_for_each_fg(g, ft) + if (compare_match_criteria(g->mask.match_criteria_enable, + match_criteria_enable, + g->mask.match_criteria, + match_criteria)) { + rule = add_rule_fg(g, match_value, + action, flow_tag, dest); + if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) + goto unlock; + } + + rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, + match_value, action, flow_tag, dest); +unlock: + unlock_ref_node(&ft->node); + return rule; +} +EXPORT_SYMBOL(mlx5_add_flow_rule); + +void mlx5_del_flow_rule(struct mlx5_flow_rule *rule) +{ + tree_remove_node(&rule->node); +} +EXPORT_SYMBOL(mlx5_del_flow_rule); + +/* Assuming prio->node.children(flow tables) is sorted by level */ +static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) +{ + struct fs_prio *prio; + + fs_get_obj(prio, ft->node.parent); + + if (!list_is_last(&ft->node.list, &prio->node.children)) + return list_next_entry(ft, node.list); + return find_next_chained_ft(prio); +} + +static int update_root_ft_destroy(struct mlx5_flow_table *ft) +{ + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + struct mlx5_flow_table *new_root_ft = NULL; + + if (root->root_ft != ft) + return 0; + + new_root_ft = find_next_ft(ft); + if (new_root_ft) { + int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); + + if (err) { + mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", + ft->id); + return err; + } + root->root_ft = new_root_ft; + } + return 0; +} + +/* Connect flow table from previous priority to + * the next flow table. + */ +static int disconnect_flow_table(struct mlx5_flow_table *ft) +{ + struct mlx5_core_dev *dev = get_dev(&ft->node); + struct mlx5_flow_table *next_ft; + struct fs_prio *prio; + int err = 0; + + err = update_root_ft_destroy(ft); + if (err) + return err; + + fs_get_obj(prio, ft->node.parent); + if (!(list_first_entry(&prio->node.children, + struct mlx5_flow_table, + node.list) == ft)) + return 0; + + next_ft = find_next_chained_ft(prio); + err = connect_prev_fts(dev, next_ft, prio); + if (err) + mlx5_core_warn(dev, "Failed to disconnect flow table %d\n", + ft->id); + return err; +} + +int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) +{ + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + int err = 0; + + mutex_lock(&root->chain_lock); + err = disconnect_flow_table(ft); + if (err) { + mutex_unlock(&root->chain_lock); + return err; + } + if (tree_remove_node(&ft->node)) + mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", + ft->id); + mutex_unlock(&root->chain_lock); + + return err; +} +EXPORT_SYMBOL(mlx5_destroy_flow_table); + +void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) +{ + if (tree_remove_node(&fg->node)) + mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", + fg->id); +} + +struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type) +{ + struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; + int prio; + static struct fs_prio *fs_prio; + struct mlx5_flow_namespace *ns; + + if (!root_ns) + return NULL; + + switch (type) { + case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_KERNEL: + case MLX5_FLOW_NAMESPACE_LEFTOVERS: + prio = type; + break; + case MLX5_FLOW_NAMESPACE_FDB: + if (dev->priv.fdb_root_ns) + return &dev->priv.fdb_root_ns->ns; + else + return NULL; + default: + return NULL; + } + + fs_prio = find_prio(&root_ns->ns, prio); + if (!fs_prio) + return NULL; + + ns = list_first_entry(&fs_prio->node.children, + typeof(*ns), + node.list); + + return ns; +} +EXPORT_SYMBOL(mlx5_get_flow_namespace); + +static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, + unsigned prio, int max_ft) +{ + struct fs_prio *fs_prio; + + fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL); + if (!fs_prio) + return ERR_PTR(-ENOMEM); + + fs_prio->node.type = FS_TYPE_PRIO; + tree_init_node(&fs_prio->node, 1, NULL); + tree_add_node(&fs_prio->node, &ns->node); + fs_prio->max_ft = max_ft; + fs_prio->prio = prio; + list_add_tail(&fs_prio->node.list, &ns->node.children); + + return fs_prio; +} + +static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace + *ns) +{ + ns->node.type = FS_TYPE_NAMESPACE; + + return ns; +} + +static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) +{ + struct mlx5_flow_namespace *ns; + + ns = kzalloc(sizeof(*ns), GFP_KERNEL); + if (!ns) + return ERR_PTR(-ENOMEM); + + fs_init_namespace(ns); + tree_init_node(&ns->node, 1, NULL); + tree_add_node(&ns->node, &prio->node); + list_add_tail(&ns->node.list, &prio->node.children); + + return ns; +} + +static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node + *prio_metadata) +{ + struct fs_prio *fs_prio; + int i; + + for (i = 0; i < prio_metadata->num_leaf_prios; i++) { + fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft); + if (IS_ERR(fs_prio)) + return PTR_ERR(fs_prio); + } + return 0; +} + +#define FLOW_TABLE_BIT_SZ 1 +#define GET_FLOW_TABLE_CAP(dev, offset) \ + ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ + offset / 32)) >> \ + (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) +static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) +{ + int i; + + for (i = 0; i < caps->arr_sz; i++) { + if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i])) + return false; + } + return true; +} + +static int init_root_tree_recursive(struct mlx5_core_dev *dev, + struct init_tree_node *init_node, + struct fs_node *fs_parent_node, + struct init_tree_node *init_parent_node, + int index) +{ + int max_ft_level = MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive. + max_ft_level); + struct mlx5_flow_namespace *fs_ns; + struct fs_prio *fs_prio; + struct fs_node *base; + int i; + int err; + + if (init_node->type == FS_TYPE_PRIO) { + if ((init_node->min_ft_level > max_ft_level) || + !has_required_caps(dev, &init_node->caps)) + return 0; + + fs_get_obj(fs_ns, fs_parent_node); + if (init_node->num_leaf_prios) + return create_leaf_prios(fs_ns, init_node); + fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft); + if (IS_ERR(fs_prio)) + return PTR_ERR(fs_prio); + base = &fs_prio->node; + } else if (init_node->type == FS_TYPE_NAMESPACE) { + fs_get_obj(fs_prio, fs_parent_node); + fs_ns = fs_create_namespace(fs_prio); + if (IS_ERR(fs_ns)) + return PTR_ERR(fs_ns); + base = &fs_ns->node; + } else { + return -EINVAL; + } + for (i = 0; i < init_node->ar_size; i++) { + err = init_root_tree_recursive(dev, &init_node->children[i], + base, init_node, i); + if (err) + return err; + } + + return 0; +} + +static int init_root_tree(struct mlx5_core_dev *dev, + struct init_tree_node *init_node, + struct fs_node *fs_parent_node) +{ + int i; + struct mlx5_flow_namespace *fs_ns; + int err; + + fs_get_obj(fs_ns, fs_parent_node); + for (i = 0; i < init_node->ar_size; i++) { + err = init_root_tree_recursive(dev, &init_node->children[i], + &fs_ns->node, + init_node, i); + if (err) + return err; + } + return 0; +} + +static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev, + enum fs_flow_table_type + table_type) +{ + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_flow_namespace *ns; + + /* Create the root namespace */ + root_ns = mlx5_vzalloc(sizeof(*root_ns)); + if (!root_ns) + return NULL; + + root_ns->dev = dev; + root_ns->table_type = table_type; + + ns = &root_ns->ns; + fs_init_namespace(ns); + mutex_init(&root_ns->chain_lock); + tree_init_node(&ns->node, 1, NULL); + tree_add_node(&ns->node, NULL); + + return root_ns; +} + +static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level); + +static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) +{ + struct fs_prio *prio; + + fs_for_each_prio(prio, ns) { + /* This updates prio start_level and max_ft */ + set_prio_attrs_in_prio(prio, acc_level); + acc_level += prio->max_ft; + } + return acc_level; +} + +static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) +{ + struct mlx5_flow_namespace *ns; + int acc_level_ns = acc_level; + + prio->start_level = acc_level; + fs_for_each_ns(ns, prio) + /* This updates start_level and max_ft of ns's priority descendants */ + acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); + if (!prio->max_ft) + prio->max_ft = acc_level_ns - prio->start_level; + WARN_ON(prio->max_ft < acc_level_ns - prio->start_level); +} + +static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) +{ + struct mlx5_flow_namespace *ns = &root_ns->ns; + struct fs_prio *prio; + int start_level = 0; + + fs_for_each_prio(prio, ns) { + set_prio_attrs_in_prio(prio, start_level); + start_level += prio->max_ft; + } +} + +static int init_root_ns(struct mlx5_core_dev *dev) +{ + + dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX); + if (IS_ERR_OR_NULL(dev->priv.root_ns)) + goto cleanup; + + if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node)) + goto cleanup; + + set_prio_attrs(dev->priv.root_ns); + + return 0; + +cleanup: + mlx5_cleanup_fs(dev); + return -ENOMEM; +} + +static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, + struct mlx5_flow_root_namespace *root_ns) +{ + struct fs_node *prio; + + if (!root_ns) + return; + + if (!list_empty(&root_ns->ns.node.children)) { + prio = list_first_entry(&root_ns->ns.node.children, + struct fs_node, + list); + if (tree_remove_node(prio)) + mlx5_core_warn(dev, + "Flow steering priority wasn't destroyed, refcount > 1\n"); + } + if (tree_remove_node(&root_ns->ns.node)) + mlx5_core_warn(dev, + "Flow steering namespace wasn't destroyed, refcount > 1\n"); + root_ns = NULL; +} + +static void cleanup_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; + struct fs_prio *iter_prio; + + if (!MLX5_CAP_GEN(dev, nic_flow_table)) + return; + + if (!root_ns) + return; + + /* stage 1 */ + fs_for_each_prio(iter_prio, &root_ns->ns) { + struct fs_node *node; + struct mlx5_flow_namespace *iter_ns; + + fs_for_each_ns_or_ft(node, iter_prio) { + if (node->type == FS_TYPE_FLOW_TABLE) + continue; + fs_get_obj(iter_ns, node); + while (!list_empty(&iter_ns->node.children)) { + struct fs_prio *obj_iter_prio2; + struct fs_node *iter_prio2 = + list_first_entry(&iter_ns->node.children, + struct fs_node, + list); + + fs_get_obj(obj_iter_prio2, iter_prio2); + if (tree_remove_node(iter_prio2)) { + mlx5_core_warn(dev, + "Priority %d wasn't destroyed, refcount > 1\n", + obj_iter_prio2->prio); + return; + } + } + } + } + + /* stage 2 */ + fs_for_each_prio(iter_prio, &root_ns->ns) { + while (!list_empty(&iter_prio->node.children)) { + struct fs_node *iter_ns = + list_first_entry(&iter_prio->node.children, + struct fs_node, + list); + if (tree_remove_node(iter_ns)) { + mlx5_core_warn(dev, + "Namespace wasn't destroyed, refcount > 1\n"); + return; + } + } + } + + /* stage 3 */ + while (!list_empty(&root_ns->ns.node.children)) { + struct fs_prio *obj_prio_node; + struct fs_node *prio_node = + list_first_entry(&root_ns->ns.node.children, + struct fs_node, + list); + + fs_get_obj(obj_prio_node, prio_node); + if (tree_remove_node(prio_node)) { + mlx5_core_warn(dev, + "Priority %d wasn't destroyed, refcount > 1\n", + obj_prio_node->prio); + return; + } + } + + if (tree_remove_node(&root_ns->ns.node)) { + mlx5_core_warn(dev, + "root namespace wasn't destroyed, refcount > 1\n"); + return; + } + + dev->priv.root_ns = NULL; +} + +void mlx5_cleanup_fs(struct mlx5_core_dev *dev) +{ + cleanup_root_ns(dev); + cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); +} + +static int init_fdb_root_ns(struct mlx5_core_dev *dev) +{ + struct fs_prio *prio; + + dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB); + if (!dev->priv.fdb_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); + return PTR_ERR(prio); + } else { + return 0; + } +} + +int mlx5_init_fs(struct mlx5_core_dev *dev) +{ + int err = 0; + + if (MLX5_CAP_GEN(dev, nic_flow_table)) { + err = init_root_ns(dev); + if (err) + return err; + } + if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + err = init_fdb_root_ns(dev); + if (err) + cleanup_root_ns(dev); + } + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h new file mode 100644 index 000000000000..00245fd7e4bc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_CORE_ +#define _MLX5_FS_CORE_ + +#include <linux/mlx5/fs.h> + +enum fs_node_type { + FS_TYPE_NAMESPACE, + FS_TYPE_PRIO, + FS_TYPE_FLOW_TABLE, + FS_TYPE_FLOW_GROUP, + FS_TYPE_FLOW_ENTRY, + FS_TYPE_FLOW_DEST +}; + +enum fs_flow_table_type { + FS_FT_NIC_RX = 0x0, + FS_FT_FDB = 0X4, +}; + +enum fs_fte_status { + FS_FTE_STATUS_EXISTING = 1UL << 0, +}; + +struct fs_node { + struct list_head list; + struct list_head children; + enum fs_node_type type; + struct fs_node *parent; + struct fs_node *root; + /* lock the node for writing and traversing */ + struct mutex lock; + atomic_t refcount; + void (*remove_func)(struct fs_node *); +}; + +struct mlx5_flow_rule { + struct fs_node node; + struct mlx5_flow_destination dest_attr; +}; + +/* Type of children is mlx5_flow_group */ +struct mlx5_flow_table { + struct fs_node node; + u32 id; + unsigned int max_fte; + unsigned int level; + enum fs_flow_table_type type; + struct { + bool active; + unsigned int required_groups; + unsigned int num_groups; + } autogroup; +}; + +/* Type of children is mlx5_flow_rule */ +struct fs_fte { + struct fs_node node; + u32 val[MLX5_ST_SZ_DW(fte_match_param)]; + u32 dests_size; + u32 flow_tag; + u32 index; + u32 action; + enum fs_fte_status status; +}; + +/* Type of children is mlx5_flow_table/namespace */ +struct fs_prio { + struct fs_node node; + unsigned int max_ft; + unsigned int start_level; + unsigned int prio; + unsigned int num_ft; +}; + +/* Type of children is fs_prio */ +struct mlx5_flow_namespace { + /* parent == NULL => root ns */ + struct fs_node node; +}; + +struct mlx5_flow_group_mask { + u8 match_criteria_enable; + u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; +}; + +/* Type of children is fs_fte */ +struct mlx5_flow_group { + struct fs_node node; + struct mlx5_flow_group_mask mask; + u32 start_index; + u32 max_ftes; + u32 num_ftes; + u32 id; +}; + +struct mlx5_flow_root_namespace { + struct mlx5_flow_namespace ns; + enum fs_flow_table_type table_type; + struct mlx5_core_dev *dev; + struct mlx5_flow_table *root_ft; + /* Should be held when chaining flow tables */ + struct mutex chain_lock; +}; + +int mlx5_init_fs(struct mlx5_core_dev *dev); +void mlx5_cleanup_fs(struct mlx5_core_dev *dev); + +#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } + +#define fs_list_for_each_entry(pos, root) \ + list_for_each_entry(pos, root, node.list) + +#define fs_for_each_ns_or_ft_reverse(pos, prio) \ + list_for_each_entry_reverse(pos, &(prio)->node.children, list) + +#define fs_for_each_ns_or_ft(pos, prio) \ + list_for_each_entry(pos, (&(prio)->node.children), list) + +#define fs_for_each_prio(pos, ns) \ + fs_list_for_each_entry(pos, &(ns)->node.children) + +#define fs_for_each_ns(pos, prio) \ + fs_list_for_each_entry(pos, &(prio)->node.children) + +#define fs_for_each_ft(pos, prio) \ + fs_list_for_each_entry(pos, &(prio)->node.children) + +#define fs_for_each_fg(pos, ft) \ + fs_list_for_each_entry(pos, &(ft)->node.children) + +#define fs_for_each_fte(pos, fg) \ + fs_list_for_each_entry(pos, &(fg)->node.children) + +#define fs_for_each_dst(pos, fte) \ + fs_list_for_each_entry(pos, &(fte)->node.children) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 9335e5ae18cc..aa1ab4702385 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -160,6 +160,30 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) if (err) return err; } + + if (MLX5_CAP_GEN(dev, vport_group_manager) && + MLX5_CAP_GEN(dev, eswitch_flow_table)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4ac8d4cc4973..67676cf0d507 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -49,6 +49,10 @@ #include <linux/delay.h> #include <linux/mlx5/mlx5_ifc.h> #include "mlx5_core.h" +#include "fs_core.h" +#ifdef CONFIG_MLX5_CORE_EN +#include "eswitch.h" +#endif MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); @@ -454,6 +458,9 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) struct mlx5_reg_host_endianess he_out; int err; + if (!mlx5_core_is_pf(dev)) + return 0; + memset(&he_in, 0, sizeof(he_in)); he_in.he = MLX5_SET_HOST_ENDIANNESS; err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), @@ -462,42 +469,52 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) return err; } -static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) +int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) { + u32 out[MLX5_ST_SZ_DW(enable_hca_out)]; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)]; int err; - struct mlx5_enable_hca_mbox_in in; - struct mlx5_enable_hca_mbox_out out; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA); + memset(in, 0, sizeof(in)); + MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); + MLX5_SET(enable_hca_in, in, function_id, func_id); + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return 0; + return mlx5_cmd_status_to_err_v2(out); } -static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) +int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) { + u32 out[MLX5_ST_SZ_DW(disable_hca_out)]; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)]; int err; - struct mlx5_disable_hca_mbox_in in; - struct mlx5_disable_hca_mbox_out out; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + memset(in, 0, sizeof(in)); + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); + MLX5_SET(disable_hca_in, in, function_id, func_id); + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); + return mlx5_cmd_status_to_err_v2(out); +} - return 0; +cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) +{ + u32 timer_h, timer_h1, timer_l; + + timer_h = ioread32be(&dev->iseg->internal_timer_h); + timer_l = ioread32be(&dev->iseg->internal_timer_l); + timer_h1 = ioread32be(&dev->iseg->internal_timer_h); + if (timer_h != timer_h1) /* wrap around */ + timer_l = ioread32be(&dev->iseg->internal_timer_l); + + return (cycle_t)timer_l | (cycle_t)timer_h1 << 32; } static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) @@ -942,7 +959,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_pagealloc_init(dev); - err = mlx5_core_enable_hca(dev); + err = mlx5_core_enable_hca(dev, 0); if (err) { dev_err(&pdev->dev, "enable hca failed\n"); goto err_pagealloc_cleanup; @@ -1052,6 +1069,25 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_init_srq_table(dev); mlx5_init_mr_table(dev); + err = mlx5_init_fs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init flow steering\n"); + goto err_fs; + } +#ifdef CONFIG_MLX5_CORE_EN + err = mlx5_eswitch_init(dev); + if (err) { + dev_err(&pdev->dev, "eswitch init failed %d\n", err); + goto err_reg_dev; + } +#endif + + err = mlx5_sriov_init(dev); + if (err) { + dev_err(&pdev->dev, "sriov init failed %d\n", err); + goto err_sriov; + } + err = mlx5_register_device(dev); if (err) { dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); @@ -1068,7 +1104,16 @@ out: return 0; +err_sriov: + if (mlx5_sriov_cleanup(dev)) + dev_err(&dev->pdev->dev, "sriov cleanup failed\n"); + +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_cleanup(dev->priv.eswitch); +#endif err_reg_dev: + mlx5_cleanup_fs(dev); +err_fs: mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); @@ -1106,7 +1151,7 @@ reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_disable_hca: - mlx5_core_disable_hca(dev); + mlx5_core_disable_hca(dev, 0); err_pagealloc_cleanup: mlx5_pagealloc_cleanup(dev); @@ -1123,6 +1168,13 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { int err = 0; + err = mlx5_sriov_cleanup(dev); + if (err) { + dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n", + __func__); + return err; + } + mutex_lock(&dev->intf_state_mutex); if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", @@ -1130,6 +1182,11 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto out; } mlx5_unregister_device(dev); +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_cleanup(dev->priv.eswitch); +#endif + + mlx5_cleanup_fs(dev); mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); @@ -1149,7 +1206,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) } mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); - mlx5_core_disable_hca(dev); + mlx5_core_disable_hca(dev, 0); mlx5_pagealloc_cleanup(dev); mlx5_cmd_cleanup(dev); @@ -1195,6 +1252,7 @@ static int init_one(struct pci_dev *pdev, return -ENOMEM; } priv = &dev->priv; + priv->pci_dev_data = id->driver_data; pci_set_drvdata(pdev, dev); @@ -1365,12 +1423,12 @@ static const struct pci_error_handlers mlx5_err_handler = { }; static const struct pci_device_id mlx5_core_pci_table[] = { - { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ - { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */ - { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */ - { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */ - { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ - { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */ + { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ + { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ + { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */ + { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ + { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ + { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ { 0, } }; @@ -1381,7 +1439,8 @@ static struct pci_driver mlx5_core_driver = { .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, - .err_handler = &mlx5_err_handler + .err_handler = &mlx5_err_handler, + .sriov_configure = mlx5_core_sriov_configure, }; static int __init init(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index cee5b7a839bc..0336847ec9a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -36,6 +36,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/if_link.h> #define DRIVER_NAME "mlx5_core" #define DRIVER_VERSION "3.0-1" @@ -64,6 +65,9 @@ do { \ (__dev)->priv.name, __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_info(__dev, format, ...) \ + dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) + enum { MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_TIME, /* print command execution time */ @@ -90,6 +94,11 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); +int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); +int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); +cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4d3377b12657..9eeee0545f1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -33,6 +33,7 @@ #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/delay.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" @@ -95,6 +96,7 @@ struct mlx5_manage_pages_outbox { enum { MAX_RECLAIM_TIME_MSECS = 5000, + MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60, }; enum { @@ -352,6 +354,10 @@ retry: goto out_4k; } + dev->priv.fw_pages += npages; + if (func_id) + dev->priv.vfs_pages += npages; + mlx5_core_dbg(dev, "err %d\n", err); kvfree(in); @@ -405,6 +411,12 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } num_claimed = be32_to_cpu(out->num_entries); + if (num_claimed > npages) { + mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n", + num_claimed, npages); + err = -EINVAL; + goto out_free; + } if (nclaimed) *nclaimed = num_claimed; @@ -412,6 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, addr = be64_to_cpu(out->pas[i]); free_4k(dev, addr); } + dev->priv.fw_pages -= num_claimed; + if (func_id) + dev->priv.vfs_pages -= num_claimed; out_free: kvfree(out); @@ -548,3 +563,26 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) { destroy_workqueue(dev->priv.pg_wq); } + +int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) +{ + unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); + int prev_vfs_pages = dev->priv.vfs_pages; + + mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, + dev->priv.name); + while (dev->priv.vfs_pages) { + if (time_after(jiffies, end)) { + mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); + return -ETIMEDOUT; + } + if (dev->priv.vfs_pages < prev_vfs_pages) { + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); + prev_vfs_pages = dev->priv.vfs_pages; + } + msleep(50); + } + + mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c new file mode 100644 index 000000000000..7b24386794f9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" +#ifdef CONFIG_MLX5_CORE_EN +#include "eswitch.h" +#endif + +static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int err; + int vf; + + for (vf = 1; vf <= num_vfs; vf++) { + err = mlx5_core_enable_hca(dev, vf); + if (err) { + mlx5_core_warn(dev, "failed to enable VF %d\n", vf - 1); + } else { + sriov->vfs_ctx[vf - 1].enabled = 1; + mlx5_core_dbg(dev, "successfully enabled VF %d\n", vf - 1); + } + } +} + +static void disable_vfs(struct mlx5_core_dev *dev, int num_vfs) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int vf; + + for (vf = 1; vf <= num_vfs; vf++) { + if (sriov->vfs_ctx[vf - 1].enabled) { + if (mlx5_core_disable_hca(dev, vf)) + mlx5_core_warn(dev, "failed to disable VF %d\n", vf - 1); + else + sriov->vfs_ctx[vf - 1].enabled = 0; + } + } +} + +static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + int err; + + if (pci_num_vf(pdev)) + pci_disable_sriov(pdev); + + enable_vfs(dev, num_vfs); + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_warn(&pdev->dev, "enable sriov failed %d\n", err); + goto ex; + } + + return 0; + +ex: + disable_vfs(dev, num_vfs); + return err; +} + +static int mlx5_core_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int err; + + kfree(sriov->vfs_ctx); + sriov->vfs_ctx = kcalloc(num_vfs, sizeof(*sriov->vfs_ctx), GFP_ATOMIC); + if (!sriov->vfs_ctx) + return -ENOMEM; + + sriov->enabled_vfs = num_vfs; + err = mlx5_core_create_vfs(pdev, num_vfs); + if (err) { + kfree(sriov->vfs_ctx); + sriov->vfs_ctx = NULL; + return err; + } + + return 0; +} + +static void mlx5_core_init_vfs(struct mlx5_core_dev *dev, int num_vfs) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + + sriov->num_vfs = num_vfs; +} + +static void mlx5_core_cleanup_vfs(struct mlx5_core_dev *dev) +{ + struct mlx5_core_sriov *sriov; + + sriov = &dev->priv.sriov; + disable_vfs(dev, sriov->num_vfs); + + if (mlx5_wait_for_vf_pages(dev)) + mlx5_core_warn(dev, "timeout claiming VFs pages\n"); + + sriov->num_vfs = 0; +} + +int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int err; + + mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs); + if (!mlx5_core_is_pf(dev)) + return -EPERM; + + mlx5_core_cleanup_vfs(dev); + + if (!num_vfs) { +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_disable_sriov(dev->priv.eswitch); +#endif + kfree(sriov->vfs_ctx); + sriov->vfs_ctx = NULL; + if (!pci_vfs_assigned(pdev)) + pci_disable_sriov(pdev); + else + pr_info("unloading PF driver while leaving orphan VFs\n"); + return 0; + } + + err = mlx5_core_sriov_enable(pdev, num_vfs); + if (err) { + dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err); + return err; + } + + mlx5_core_init_vfs(dev, num_vfs); +#ifdef CONFIG_MLX5_CORE_EN + mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs); +#endif + + return num_vfs; +} + +static int sync_required(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + int cur_vfs = pci_num_vf(pdev); + + if (cur_vfs != sriov->num_vfs) { + pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs); + return 1; + } + + return 0; +} + +int mlx5_sriov_init(struct mlx5_core_dev *dev) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + struct pci_dev *pdev = dev->pdev; + int cur_vfs; + + if (!mlx5_core_is_pf(dev)) + return 0; + + if (!sync_required(dev->pdev)) + return 0; + + cur_vfs = pci_num_vf(pdev); + sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); + if (!sriov->vfs_ctx) + return -ENOMEM; + + sriov->enabled_vfs = cur_vfs; + + mlx5_core_init_vfs(dev, cur_vfs); +#ifdef CONFIG_MLX5_CORE_EN + if (cur_vfs) + mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs); +#endif + + enable_vfs(dev, cur_vfs); + + return 0; +} + +int mlx5_sriov_cleanup(struct mlx5_core_dev *dev) +{ + struct pci_dev *pdev = dev->pdev; + int err; + + if (!mlx5_core_is_pf(dev)) + return 0; + + err = mlx5_core_sriov_configure(pdev, 0); + if (err) + return err; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b94177ebcf3a..076197efea9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -36,54 +36,399 @@ #include <linux/mlx5/vport.h> #include "mlx5_core.h" -u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod) +static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; - u32 out[MLX5_ST_SZ_DW(query_vport_state_out)]; int err; + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; memset(in, 0, sizeof(in)); MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE); MLX5_SET(query_vport_state_in, in, op_mod, opmod); + MLX5_SET(query_vport_state_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_vport_state_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, - sizeof(out)); + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); if (err) mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n"); + return err; +} + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) +{ + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0}; + + _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out)); + return MLX5_GET(query_vport_state_out, out, state); } -EXPORT_SYMBOL(mlx5_query_vport_state); +EXPORT_SYMBOL_GPL(mlx5_query_vport_state); + +u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) +{ + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0}; + + _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out)); + + return MLX5_GET(query_vport_state_out, out, admin_state); +} +EXPORT_SYMBOL(mlx5_query_vport_admin_state); -void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) +int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport, u8 state) +{ + u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]; + u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(modify_vport_state_in, in, opcode, + MLX5_CMD_OP_MODIFY_VPORT_STATE); + MLX5_SET(modify_vport_state_in, in, op_mod, opmod); + MLX5_SET(modify_vport_state_in, in, vport_number, vport); + + if (vport) + MLX5_SET(modify_vport_state_in, in, other_vport, 1); + + MLX5_SET(modify_vport_state_in, in, admin_state, state); + + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, + sizeof(out)); + if (err) + mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n"); + + return err; +} +EXPORT_SYMBOL(mlx5_modify_vport_admin_state); + +static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, + u32 *out, int outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); +} + +static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); +} + +int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, + u16 vport, u8 *addr) { - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); u8 *out_addr; + int err; out = mlx5_vzalloc(outlen); if (!out) - return; + return -ENOMEM; out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, nic_vport_context.permanent_address); + err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); + if (err) + goto out; + + ether_addr_copy(addr, &out_addr[2]); + +out: + kvfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address); + +int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, + u16 vport, u8 *addr) +{ + void *in; + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + int err; + void *nic_vport_ctx; + u8 *perm_mac; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(mdev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(modify_nic_vport_context_in, in, + field_select.permanent_address, 1); + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); + + if (vport) + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, + in, nic_vport_context); + perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx, + permanent_address); + + ether_addr_copy(&perm_mac[2], addr); + + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address); + +int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, + u32 vport, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + void *nic_vport_ctx; + int max_list_size; + int req_list_size; + int out_sz; + void *out; + int err; + int i; + + req_list_size = *list_size; + + max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ? + 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : + 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); + + if (req_list_size > max_list_size) { + mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", + req_list_size, max_list_size); + req_list_size = max_list_size; + } + + out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type); + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); - memset(out, 0, outlen); - mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + if (vport) + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - ether_addr_copy(addr, &out_addr[2]); + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + if (err) + goto out; - kvfree(out); + nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, + nic_vport_context); + req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, + allowed_list_size); + + *list_size = req_list_size; + for (i = 0; i < req_list_size; i++) { + u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]) + 2; + ether_addr_copy(addr_list[i], mac_addr); + } +out: + kfree(out); + return err; } -EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address); +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list); + +int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size) +{ + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + void *nic_vport_ctx; + int max_list_size; + int in_sz; + void *in; + int err; + int i; + + max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ? + 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : + 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); + + if (list_size > max_list_size) + return -ENOSPC; + + in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + list_size * MLX5_ST_SZ_BYTES(mac_address_layout); + + memset(out, 0, sizeof(out)); + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + MLX5_SET(modify_nic_vport_context_in, in, + field_select.addresses_list, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, + nic_vport_context); + + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_type, list_type); + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_size, list_size); + + for (i = 0; i < list_size; i++) { + u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]) + 2; + ether_addr_copy(curr_mac, addr_list[i]); + } + + err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list); + +int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, + u32 vport, + u16 vlans[], + int *size) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + void *nic_vport_ctx; + int req_list_size; + int max_list_size; + int out_sz; + void *out; + int err; + int i; + + req_list_size = *size; + max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); + if (req_list_size > max_list_size) { + mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n", + req_list_size, max_list_size); + req_list_size = max_list_size; + } + + out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + req_list_size * MLX5_ST_SZ_BYTES(vlan_layout); + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, + MLX5_NVPRT_LIST_TYPE_VLAN); + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); + + if (vport) + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + if (err) + goto out; + + nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, + nic_vport_context); + req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, + allowed_list_size); + + *size = req_list_size; + for (i = 0; i < req_list_size; i++) { + void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]); + vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans); + +int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, + u16 vlans[], + int list_size) +{ + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + void *nic_vport_ctx; + int max_list_size; + int in_sz; + void *in; + int err; + int i; + + max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); + + if (list_size > max_list_size) + return -ENOSPC; + + in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + + list_size * MLX5_ST_SZ_BYTES(vlan_layout); + + memset(out, 0, sizeof(out)); + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + MLX5_SET(modify_nic_vport_context_in, in, + field_select.addresses_list, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, + nic_vport_context); + + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN); + MLX5_SET(nic_vport_context, nic_vport_ctx, + allowed_list_size, list_size); + + for (i = 0; i < list_size; i++) { + void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, + nic_vport_ctx, + current_uc_mac_address[i]); + MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]); + } + + err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, u16 gid_index, @@ -343,3 +688,65 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, return err; } EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); + +int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, + u32 vport, + int *promisc_uc, + int *promisc_mc, + int *promisc_all) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + int err; + + out = kzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); + if (err) + goto out; + + *promisc_uc = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.promisc_uc); + *promisc_mc = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.promisc_mc); + *promisc_all = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.promisc_all); + +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc); + +int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, + int promisc_uc, + int promisc_mc, + int promisc_all) +{ + void *in; + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_err(mdev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1); + MLX5_SET(modify_nic_vport_context_in, in, + nic_vport_context.promisc_uc, promisc_uc); + MLX5_SET(modify_nic_vport_context_in, in, + nic_vport_context.promisc_mc, promisc_mc); + MLX5_SET(modify_nic_vport_context_in, in, + nic_vport_context.promisc_all, promisc_all); + + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index e36e12219c9b..ce26adcb4988 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -10,6 +10,14 @@ config MLXSW_CORE To compile this driver as a module, choose M here: the module will be called mlxsw_core. +config MLXSW_CORE_HWMON + bool "HWMON support for Mellanox Technologies Switch ASICs" + depends on MLXSW_CORE && HWMON + depends on !(MLXSW_CORE=y && HWMON=m) + default y + ---help--- + Say Y here if you want to expose HWMON interface on mlxsw devices. + config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE @@ -33,7 +41,7 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" - depends on MLXSW_CORE && NET_SWITCHDEV + depends on MLXSW_CORE && NET_SWITCHDEV && VLAN_8021Q default m ---help--- This driver supports Mellanox Technologies Spectrum Ethernet diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index af015818fd19..584cac444852 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o +mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o mlxsw_pci-objs := pci.o obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 97f0d93caf99..22379eb8e924 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -105,6 +105,10 @@ struct mlxsw_core { struct debugfs_blob_wrapper vsd_blob; struct debugfs_blob_wrapper psid_blob; } dbg; + struct { + u8 *mapping; /* lag_id+port_index to local_port mapping */ + } lag; + struct mlxsw_hwmon *hwmon; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; @@ -814,6 +818,17 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_alloc_stats; } + if (mlxsw_driver->profile->used_max_lag && + mlxsw_driver->profile->used_max_port_per_lag) { + alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag * + mlxsw_driver->profile->max_port_per_lag; + mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_core->lag.mapping) { + err = -ENOMEM; + goto err_alloc_lag_mapping; + } + } + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); if (err) goto err_bus_init; @@ -822,6 +837,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_emad_init; + err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); + if (err) + goto err_hwmon_init; + err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, mlxsw_bus_info); if (err) @@ -836,10 +855,13 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_debugfs_init: mlxsw_core->driver->fini(mlxsw_core->driver_priv); err_driver_init: +err_hwmon_init: mlxsw_emad_fini(mlxsw_core); err_emad_init: mlxsw_bus->fini(bus_priv); err_bus_init: + kfree(mlxsw_core->lag.mapping); +err_alloc_lag_mapping: free_percpu(mlxsw_core->pcpu_stats); err_alloc_stats: kfree(mlxsw_core); @@ -857,6 +879,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) mlxsw_core->driver->fini(mlxsw_core->driver_priv); mlxsw_emad_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); + kfree(mlxsw_core->lag.mapping); free_percpu(mlxsw_core->pcpu_stats); kfree(mlxsw_core); mlxsw_core_driver_put(device_kind); @@ -1188,11 +1211,25 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_listener_item *rxl_item; const struct mlxsw_rx_listener *rxl; struct mlxsw_core_pcpu_stats *pcpu_stats; - u8 local_port = rx_info->sys_port; + u8 local_port; bool found = false; - dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", - __func__, rx_info->sys_port, rx_info->trap_id); + if (rx_info->is_lag) { + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", + __func__, rx_info->u.lag_id, + rx_info->trap_id); + /* Upper layer does not care if the skb came from LAG or not, + * so just get the local_port for the lag port and push it up. + */ + local_port = mlxsw_core_lag_mapping_get(mlxsw_core, + rx_info->u.lag_id, + rx_info->lag_port_index); + } else { + local_port = rx_info->u.sys_port; + } + + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", + __func__, local_port, rx_info->trap_id); if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || (local_port >= MLXSW_PORT_MAX_PORTS)) @@ -1236,6 +1273,48 @@ drop: } EXPORT_SYMBOL(mlxsw_core_skb_receive); +static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) +{ + return mlxsw_core->driver->profile->max_port_per_lag * lag_id + + port_index; +} + +void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index, u8 local_port) +{ + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, port_index); + + mlxsw_core->lag.mapping[index] = local_port; +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); + +u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) +{ + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, port_index); + + return mlxsw_core->lag.mapping[index]; +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); + +void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 local_port) +{ + int i; + + for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) { + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, i); + + if (mlxsw_core->lag.mapping[index] == local_port) + mlxsw_core->lag.mapping[index] = 0; + } +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); + int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod, bool out_mbox_direct, char *in_mbox, size_t in_mbox_size, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 807827350a89..a01723600f0a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -112,13 +112,25 @@ int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload); struct mlxsw_rx_info { - u16 sys_port; + bool is_lag; + union { + u16 sys_port; + u16 lag_id; + } u; + u8 lag_port_index; int trap_id; }; void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_info *rx_info); +void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index, u8 local_port); +u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index); +void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 local_port); + #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 struct mlxsw_swid_config { @@ -209,4 +221,24 @@ struct mlxsw_bus_info { u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; }; +struct mlxsw_hwmon; + +#ifdef CONFIG_MLXSW_CORE_HWMON + +int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon); +void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon); + +#else + +static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon) +{ + return 0; +} + +#endif + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c new file mode 100644 index 000000000000..1ac8bf187168 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -0,0 +1,372 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/hwmon.h> +#include <linux/err.h> + +#include "core.h" + +#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127 +#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \ + MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX) + +struct mlxsw_hwmon_attr { + struct device_attribute dev_attr; + struct mlxsw_hwmon *hwmon; + unsigned int type_index; + char name[32]; +}; + +struct mlxsw_hwmon { + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + struct device *hwmon_dev; + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[MLXSW_HWMON_ATTR_COUNT + 1]; + struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT]; + unsigned int attrs_count; +}; + +static ssize_t mlxsw_hwmon_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned int temp; + int err; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, + false, false); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL); + return sprintf(buf, "%u\n", temp); +} + +static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned int temp_max; + int err; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, + false, false); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL); + return sprintf(buf, "%u\n", temp_max); +} + +static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + if (val != 1) + return -EINVAL; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, true, true); + err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n"); + return err; + } + return len; +} + +static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsm_pl[MLXSW_REG_MFSM_LEN]; + int err; + + mlxsw_reg_mfsm_pack(mfsm_pl, mlwsw_hwmon_attr->type_index); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n"); + return err; + } + return sprintf(buf, "%u\n", mlxsw_reg_mfsm_rpm_get(mfsm_pl)); +} + +static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + int err; + + mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, 0); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n"); + return err; + } + return sprintf(buf, "%u\n", + mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl)); +} + +static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + if (val > 255) + return -EINVAL; + + mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, val); + err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n"); + return err; + } + return len; +} + +enum mlxsw_hwmon_attr_type { + MLXSW_HWMON_ATTR_TYPE_TEMP, + MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, + MLXSW_HWMON_ATTR_TYPE_TEMP_RST, + MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + MLXSW_HWMON_ATTR_TYPE_PWM, +}; + +static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, + enum mlxsw_hwmon_attr_type attr_type, + unsigned int type_index, unsigned int num) { + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr; + unsigned int attr_index; + + attr_index = mlxsw_hwmon->attrs_count; + mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index]; + + switch (attr_type) { + case MLXSW_HWMON_ATTR_TYPE_TEMP: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_highest", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_RST: + mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_reset_history", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_FAN_RPM: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "fan%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_PWM: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show; + mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR | S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "pwm%u", num + 1); + break; + default: + WARN_ON(1); + } + + mlxsw_hwmon_attr->type_index = type_index; + mlxsw_hwmon_attr->hwmon = mlxsw_hwmon; + mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name; + sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr); + + mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr; + mlxsw_hwmon->attrs_count++; +} + +static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + char mtcap_pl[MLXSW_REG_MTCAP_LEN]; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + u8 sensor_count; + int i; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtcap), mtcap_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n"); + return err; + } + sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); + for (i = 0; i < sensor_count; i++) { + mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true); + err = mlxsw_reg_write(mlxsw_hwmon->core, + MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to setup temp sensor number %d\n", + i); + return err; + } + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP, i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i); + } + return 0; +} + +static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + char mfcr_pl[MLXSW_REG_MFCR_LEN]; + enum mlxsw_reg_mfcr_pwm_frequency freq; + unsigned int type_index; + unsigned int num; + u16 tacho_active; + u8 pwm_active; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfcr), mfcr_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get to probe PWMs and Tachometers\n"); + return err; + } + mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active); + num = 0; + for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) { + if (tacho_active & BIT(type_index)) + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + type_index, num++); + } + num = 0; + for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) { + if (pwm_active & BIT(type_index)) + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_PWM, + type_index, num++); + } + return 0; +} + +int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon) +{ + struct mlxsw_hwmon *mlxsw_hwmon; + struct device *hwmon_dev; + int err; + + mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon), + GFP_KERNEL); + if (!mlxsw_hwmon) + return -ENOMEM; + mlxsw_hwmon->core = mlxsw_core; + mlxsw_hwmon->bus_info = mlxsw_bus_info; + + err = mlxsw_hwmon_temp_init(mlxsw_hwmon); + if (err) + goto err_temp_init; + + err = mlxsw_hwmon_fans_init(mlxsw_hwmon); + if (err) + goto err_fans_init; + + mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; + mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev, + "mlxsw", + mlxsw_hwmon, + mlxsw_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + err = PTR_ERR(hwmon_dev); + goto err_hwmon_register; + } + + mlxsw_hwmon->hwmon_dev = hwmon_dev; + *p_hwmon = mlxsw_hwmon; + return 0; + +err_hwmon_register: +err_fans_init: +err_temp_init: + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index de69e719dc9d..c071077aafbd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -384,7 +384,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, /* Set CQ of same number of this SDQ. */ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); - mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7); + mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); @@ -686,11 +686,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - /* We do not support lag now */ - if (mlxsw_pci_cqe_lag_get(cqe)) - goto drop; + if (mlxsw_pci_cqe_lag_get(cqe)) { + rx_info.is_lag = true; + rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe); + rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe); + } else { + rx_info.is_lag = false; + rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); + } - rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe); rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); byte_count = mlxsw_pci_cqe_byte_count_get(cqe); @@ -699,7 +703,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); -put_new_skb: memset(wqe, 0, q->elem_size); err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); if (err) @@ -708,10 +711,6 @@ put_new_skb: q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); return; - -drop: - dev_kfree_skb_any(skb); - goto put_new_skb; } static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 142f33d978c5..912106054ff2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -129,13 +129,15 @@ MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); */ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); -/* pci_cqe_system_port +/* pci_cqe_system_port/lag_id * When lag=0: System port on which the packet was received * When lag=1: * bits [15:4] LAG ID on which the packet was received * bits [3:0] sub_port on which the packet was received */ MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); +MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); +MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); /* pci_cqe_wqe_counter * WQE count of the WQEs completed on the associated dqn diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 236fb5d2ad69..0c5237264e3e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -99,6 +99,55 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = { */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); +/* SMID - Switch Multicast ID + * -------------------------- + * The MID record maps from a MID (Multicast ID), which is a unique identifier + * of the multicast group within the stacking domain, into a list of local + * ports into which the packet is replicated. + */ +#define MLXSW_REG_SMID_ID 0x2007 +#define MLXSW_REG_SMID_LEN 0x240 + +static const struct mlxsw_reg_info mlxsw_reg_smid = { + .id = MLXSW_REG_SMID_ID, + .len = MLXSW_REG_SMID_LEN, +}; + +/* reg_smid_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); + +/* reg_smid_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); + +/* reg_smid_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); + +/* reg_smid_port_mask + * Local port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_smid_pack(char *payload, u16 mid, + u8 port, bool set) +{ + MLXSW_REG_ZERO(smid, payload); + mlxsw_reg_smid_swid_set(payload, 0); + mlxsw_reg_smid_mid_set(payload, mid); + mlxsw_reg_smid_port_set(payload, port, set); + mlxsw_reg_smid_port_mask_set(payload, port, 1); +} + /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -286,6 +335,8 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8, enum mlxsw_reg_sfd_rec_type { MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, + MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, + MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, }; /* reg_sfd_rec_type @@ -376,36 +427,144 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16, MLXSW_REG_SFD_REC_LEN, 0x0C, false); -static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, - enum mlxsw_reg_sfd_rec_policy policy, - const char *mac, u16 vid, - enum mlxsw_reg_sfd_rec_action action, - u8 local_port) +static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_type rec_type, + const char *mac, + enum mlxsw_reg_sfd_rec_action action) { u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload); if (rec_index >= num_rec) mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1); mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0); - mlxsw_reg_sfd_rec_type_set(payload, rec_index, - MLXSW_REG_SFD_REC_TYPE_UNICAST); - mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_rec_type_set(payload, rec_index, rec_type); mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac); - mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); - mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid); mlxsw_reg_sfd_rec_action_set(payload, rec_index, action); +} + +static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, + u8 local_port) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid); mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); } static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, - char *mac, u16 *p_vid, + char *mac, u16 *p_fid_vid, u8 *p_local_port) { mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); - *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); + *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); } +/* reg_sfd_uc_lag_sub_port + * LAG sub port. + * Must be 0 if multichannel VEPA is not enabled. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_lag_fid_vid + * Filtering ID or VLAN ID + * For SwitchX and SwitchX-2: + * - Dynamic entries (policy 2,3) use FID + * - Static entries (policy 0) use VID + * - When independent learning is configured, VID=FID + * For Spectrum: use FID for both Dynamic and Static entries. + * VID should not be used. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_lag_lag_vid + * Indicates VID in case of vFIDs. Reserved for FIDs. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_lag_lag_id + * LAG Identifier - pointer into the LAG descriptor table. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_id, MLXSW_REG_SFD_BASE_LEN, 0, 10, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, u16 lag_vid, + u16 lag_id) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG, + mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid); + mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid); + mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id); +} + +static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u16 *p_lag_id) +{ + mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index); + *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index); +} + +/* reg_sfd_mc_pgi + * + * Multicast port group index - index into the port group table. + * Value 0x1FFF indicates the pgi should point to the MID entry. + * For Spectrum this value must be set to 0x1FFF + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_pgi, MLXSW_REG_SFD_BASE_LEN, 16, 13, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_fid_vid + * + * Filtering ID or VLAN ID + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_mid + * + * Multicast identifier - global identifier that represents the multicast + * group across all devices. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_mid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, u16 mid) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_MULTICAST, mac, action); + mlxsw_reg_sfd_mc_pgi_set(payload, rec_index, 0x1FFF); + mlxsw_reg_sfd_mc_fid_vid_set(payload, rec_index, fid_vid); + mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); +} + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -456,8 +615,12 @@ MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8, enum mlxsw_reg_sfn_rec_type { /* MAC addresses learned on a regular port. */ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5, - /* Aged-out MAC address on a regular port */ + /* MAC addresses learned on a LAG port. */ + MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG = 0x6, + /* Aged-out MAC address on a regular port. */ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7, + /* Aged-out MAC address on a LAG port. */ + MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8, }; /* reg_sfn_rec_type @@ -505,6 +668,22 @@ static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index); } +/* reg_sfn_mac_lag_lag_id + * LAG ID (pointer into the LAG descriptor table). + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_lag_lag_id, MLXSW_REG_SFN_BASE_LEN, 0, 10, + MLXSW_REG_SFN_REC_LEN, 0x0C, false); + +static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u16 *p_lag_id) +{ + mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); + *p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index); +} + /* SPMS - Switch Port MSTP/RSTP State Register * ------------------------------------------- * Configures the spanning tree state of a physical port. @@ -865,6 +1044,293 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_port_mask_set(payload, port, 1); } +/* SLDR - Switch LAG Descriptor Register + * ----------------------------------------- + * The switch LAG descriptor register is populated by LAG descriptors. + * Each LAG descriptor is indexed by lag_id. The LAG ID runs from 0 to + * max_lag-1. + */ +#define MLXSW_REG_SLDR_ID 0x2014 +#define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */ + +static const struct mlxsw_reg_info mlxsw_reg_sldr = { + .id = MLXSW_REG_SLDR_ID, + .len = MLXSW_REG_SLDR_LEN, +}; + +enum mlxsw_reg_sldr_op { + /* Indicates a creation of a new LAG-ID, lag_id must be valid */ + MLXSW_REG_SLDR_OP_LAG_CREATE, + MLXSW_REG_SLDR_OP_LAG_DESTROY, + /* Ports that appear in the list have the Distributor enabled */ + MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST, + /* Removes ports from the disributor list */ + MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST, +}; + +/* reg_sldr_op + * Operation. + * Access: RW + */ +MLXSW_ITEM32(reg, sldr, op, 0x00, 29, 3); + +/* reg_sldr_lag_id + * LAG identifier. The lag_id is the index into the LAG descriptor table. + * Access: Index + */ +MLXSW_ITEM32(reg, sldr, lag_id, 0x00, 0, 10); + +static inline void mlxsw_reg_sldr_lag_create_pack(char *payload, u8 lag_id) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_CREATE); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); +} + +static inline void mlxsw_reg_sldr_lag_destroy_pack(char *payload, u8 lag_id) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_DESTROY); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); +} + +/* reg_sldr_num_ports + * The number of member ports of the LAG. + * Reserved for Create / Destroy operations + * For Add / Remove operations - indicates the number of ports in the list. + * Access: RW + */ +MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8); + +/* reg_sldr_system_port + * System port. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false); + +static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id, + u8 local_port) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); + mlxsw_reg_sldr_num_ports_set(payload, 1); + mlxsw_reg_sldr_system_port_set(payload, 0, local_port); +} + +static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id, + u8 local_port) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); + mlxsw_reg_sldr_num_ports_set(payload, 1); + mlxsw_reg_sldr_system_port_set(payload, 0, local_port); +} + +/* SLCR - Switch LAG Configuration 2 Register + * ------------------------------------------- + * The Switch LAG Configuration register is used for configuring the + * LAG properties of the switch. + */ +#define MLXSW_REG_SLCR_ID 0x2015 +#define MLXSW_REG_SLCR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_slcr = { + .id = MLXSW_REG_SLCR_ID, + .len = MLXSW_REG_SLCR_LEN, +}; + +enum mlxsw_reg_slcr_pp { + /* Global Configuration (for all ports) */ + MLXSW_REG_SLCR_PP_GLOBAL, + /* Per port configuration, based on local_port field */ + MLXSW_REG_SLCR_PP_PER_PORT, +}; + +/* reg_slcr_pp + * Per Port Configuration + * Note: Reading at Global mode results in reading port 1 configuration. + * Access: Index + */ +MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1); + +/* reg_slcr_local_port + * Local port number + * Supported from CPU port + * Not supported from router port + * Reserved when pp = Global Configuration + * Access: Index + */ +MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8); + +enum mlxsw_reg_slcr_type { + MLXSW_REG_SLCR_TYPE_CRC, /* default */ + MLXSW_REG_SLCR_TYPE_XOR, + MLXSW_REG_SLCR_TYPE_RANDOM, +}; + +/* reg_slcr_type + * Hash type + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4); + +/* Ingress port */ +#define MLXSW_REG_SLCR_LAG_HASH_IN_PORT BIT(0) +/* SMAC - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_SMAC_IP BIT(1) +/* SMAC - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP BIT(2) +#define MLXSW_REG_SLCR_LAG_HASH_SMAC \ + (MLXSW_REG_SLCR_LAG_HASH_SMAC_IP | \ + MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP) +/* DMAC - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_DMAC_IP BIT(3) +/* DMAC - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP BIT(4) +#define MLXSW_REG_SLCR_LAG_HASH_DMAC \ + (MLXSW_REG_SLCR_LAG_HASH_DMAC_IP | \ + MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP) +/* Ethertype - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP BIT(5) +/* Ethertype - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP BIT(6) +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE \ + (MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP | \ + MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP) +/* VLAN ID - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_VLANID_IP BIT(7) +/* VLAN ID - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP BIT(8) +#define MLXSW_REG_SLCR_LAG_HASH_VLANID \ + (MLXSW_REG_SLCR_LAG_HASH_VLANID_IP | \ + MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP) +/* Source IP address (can be IPv4 or IPv6) */ +#define MLXSW_REG_SLCR_LAG_HASH_SIP BIT(9) +/* Destination IP address (can be IPv4 or IPv6) */ +#define MLXSW_REG_SLCR_LAG_HASH_DIP BIT(10) +/* TCP/UDP source port */ +#define MLXSW_REG_SLCR_LAG_HASH_SPORT BIT(11) +/* TCP/UDP destination port*/ +#define MLXSW_REG_SLCR_LAG_HASH_DPORT BIT(12) +/* IPv4 Protocol/IPv6 Next Header */ +#define MLXSW_REG_SLCR_LAG_HASH_IPPROTO BIT(13) +/* IPv6 Flow label */ +#define MLXSW_REG_SLCR_LAG_HASH_FLOWLABEL BIT(14) +/* SID - FCoE source ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_SID BIT(15) +/* DID - FCoE destination ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_DID BIT(16) +/* OXID - FCoE originator exchange ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_OXID BIT(17) +/* Destination QP number - for RoCE packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ROCE_DQP BIT(19) + +/* reg_slcr_lag_hash + * LAG hashing configuration. This is a bitmask, in which each set + * bit includes the corresponding item in the LAG hash calculation. + * The default lag_hash contains SMAC, DMAC, VLANID and + * Ethertype (for all packet types). + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20); + +static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) +{ + MLXSW_REG_ZERO(slcr, payload); + mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL); + mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR); + mlxsw_reg_slcr_lag_hash_set(payload, lag_hash); +} + +/* SLCOR - Switch LAG Collector Register + * ------------------------------------- + * The Switch LAG Collector register controls the Local Port membership + * in a LAG and enablement of the collector. + */ +#define MLXSW_REG_SLCOR_ID 0x2016 +#define MLXSW_REG_SLCOR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_slcor = { + .id = MLXSW_REG_SLCOR_ID, + .len = MLXSW_REG_SLCOR_LEN, +}; + +enum mlxsw_reg_slcor_col { + /* Port is added with collector disabled */ + MLXSW_REG_SLCOR_COL_LAG_ADD_PORT, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_DISABLED, + MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT, +}; + +/* reg_slcor_col + * Collector configuration + * Access: RW + */ +MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2); + +/* reg_slcor_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8); + +/* reg_slcor_lag_id + * LAG Identifier. Index into the LAG descriptor table. + * Access: Index + */ +MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10); + +/* reg_slcor_port_index + * Port index in the LAG list. Only valid on Add Port to LAG col. + * Valid range is from 0 to cap_max_lag_members-1 + * Access: RW + */ +MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10); + +static inline void mlxsw_reg_slcor_pack(char *payload, + u8 local_port, u16 lag_id, + enum mlxsw_reg_slcor_col col) +{ + MLXSW_REG_ZERO(slcor, payload); + mlxsw_reg_slcor_col_set(payload, col); + mlxsw_reg_slcor_local_port_set(payload, local_port); + mlxsw_reg_slcor_lag_id_set(payload, lag_id); +} + +static inline void mlxsw_reg_slcor_port_add_pack(char *payload, + u8 local_port, u16 lag_id, + u8 port_index) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_ADD_PORT); + mlxsw_reg_slcor_port_index_set(payload, port_index); +} + +static inline void mlxsw_reg_slcor_port_remove_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT); +} + +static inline void mlxsw_reg_slcor_col_enable_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); +} + +static inline void mlxsw_reg_slcor_col_disable_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); +} + /* SPMLR - Switch Port MAC Learning Register * ----------------------------------------- * Controls the Switch MAC learning policy per port. @@ -2087,6 +2553,284 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); } +/* MFCR - Management Fan Control Register + * -------------------------------------- + * This register controls the settings of the Fan Speed PWM mechanism. + */ +#define MLXSW_REG_MFCR_ID 0x9001 +#define MLXSW_REG_MFCR_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfcr = { + .id = MLXSW_REG_MFCR_ID, + .len = MLXSW_REG_MFCR_LEN, +}; + +enum mlxsw_reg_mfcr_pwm_frequency { + MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00, + MLXSW_REG_MFCR_PWM_FEQ_14_7HZ = 0x01, + MLXSW_REG_MFCR_PWM_FEQ_22_1HZ = 0x02, + MLXSW_REG_MFCR_PWM_FEQ_1_4KHZ = 0x40, + MLXSW_REG_MFCR_PWM_FEQ_5KHZ = 0x41, + MLXSW_REG_MFCR_PWM_FEQ_20KHZ = 0x42, + MLXSW_REG_MFCR_PWM_FEQ_22_5KHZ = 0x43, + MLXSW_REG_MFCR_PWM_FEQ_25KHZ = 0x44, +}; + +/* reg_mfcr_pwm_frequency + * Controls the frequency of the PWM signal. + * Access: RW + */ +MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 6); + +#define MLXSW_MFCR_TACHOS_MAX 10 + +/* reg_mfcr_tacho_active + * Indicates which of the tachometer is active (bit per tachometer). + * Access: RO + */ +MLXSW_ITEM32(reg, mfcr, tacho_active, 0x04, 16, MLXSW_MFCR_TACHOS_MAX); + +#define MLXSW_MFCR_PWMS_MAX 5 + +/* reg_mfcr_pwm_active + * Indicates which of the PWM control is active (bit per PWM). + * Access: RO + */ +MLXSW_ITEM32(reg, mfcr, pwm_active, 0x04, 0, MLXSW_MFCR_PWMS_MAX); + +static inline void +mlxsw_reg_mfcr_pack(char *payload, + enum mlxsw_reg_mfcr_pwm_frequency pwm_frequency) +{ + MLXSW_REG_ZERO(mfcr, payload); + mlxsw_reg_mfcr_pwm_frequency_set(payload, pwm_frequency); +} + +static inline void +mlxsw_reg_mfcr_unpack(char *payload, + enum mlxsw_reg_mfcr_pwm_frequency *p_pwm_frequency, + u16 *p_tacho_active, u8 *p_pwm_active) +{ + *p_pwm_frequency = mlxsw_reg_mfcr_pwm_frequency_get(payload); + *p_tacho_active = mlxsw_reg_mfcr_tacho_active_get(payload); + *p_pwm_active = mlxsw_reg_mfcr_pwm_active_get(payload); +} + +/* MFSC - Management Fan Speed Control Register + * -------------------------------------------- + * This register controls the settings of the Fan Speed PWM mechanism. + */ +#define MLXSW_REG_MFSC_ID 0x9002 +#define MLXSW_REG_MFSC_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfsc = { + .id = MLXSW_REG_MFSC_ID, + .len = MLXSW_REG_MFSC_LEN, +}; + +/* reg_mfsc_pwm + * Fan pwm to control / monitor. + * Access: Index + */ +MLXSW_ITEM32(reg, mfsc, pwm, 0x00, 24, 3); + +/* reg_mfsc_pwm_duty_cycle + * Controls the duty cycle of the PWM. Value range from 0..255 to + * represent duty cycle of 0%...100%. + * Access: RW + */ +MLXSW_ITEM32(reg, mfsc, pwm_duty_cycle, 0x04, 0, 8); + +static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm, + u8 pwm_duty_cycle) +{ + MLXSW_REG_ZERO(mfsc, payload); + mlxsw_reg_mfsc_pwm_set(payload, pwm); + mlxsw_reg_mfsc_pwm_duty_cycle_set(payload, pwm_duty_cycle); +} + +/* MFSM - Management Fan Speed Measurement + * --------------------------------------- + * This register controls the settings of the Tacho measurements and + * enables reading the Tachometer measurements. + */ +#define MLXSW_REG_MFSM_ID 0x9003 +#define MLXSW_REG_MFSM_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfsm = { + .id = MLXSW_REG_MFSM_ID, + .len = MLXSW_REG_MFSM_LEN, +}; + +/* reg_mfsm_tacho + * Fan tachometer index. + * Access: Index + */ +MLXSW_ITEM32(reg, mfsm, tacho, 0x00, 24, 4); + +/* reg_mfsm_rpm + * Fan speed (round per minute). + * Access: RO + */ +MLXSW_ITEM32(reg, mfsm, rpm, 0x04, 0, 16); + +static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho) +{ + MLXSW_REG_ZERO(mfsm, payload); + mlxsw_reg_mfsm_tacho_set(payload, tacho); +} + +/* MTCAP - Management Temperature Capabilities + * ------------------------------------------- + * This register exposes the capabilities of the device and + * system temperature sensing. + */ +#define MLXSW_REG_MTCAP_ID 0x9009 +#define MLXSW_REG_MTCAP_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mtcap = { + .id = MLXSW_REG_MTCAP_ID, + .len = MLXSW_REG_MTCAP_LEN, +}; + +/* reg_mtcap_sensor_count + * Number of sensors supported by the device. + * This includes the QSFP module sensors (if exists in the QSFP module). + * Access: RO + */ +MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7); + +/* MTMP - Management Temperature + * ----------------------------- + * This register controls the settings of the temperature measurements + * and enables reading the temperature measurements. Note that temperature + * is in 0.125 degrees Celsius. + */ +#define MLXSW_REG_MTMP_ID 0x900A +#define MLXSW_REG_MTMP_LEN 0x20 + +static const struct mlxsw_reg_info mlxsw_reg_mtmp = { + .id = MLXSW_REG_MTMP_ID, + .len = MLXSW_REG_MTMP_LEN, +}; + +/* reg_mtmp_sensor_index + * Sensors index to access. + * 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially + * (module 0 is mapped to sensor_index 64). + * Access: Index + */ +MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 7); + +/* Convert to milli degrees Celsius */ +#define MLXSW_REG_MTMP_TEMP_TO_MC(val) (val * 125) + +/* reg_mtmp_temperature + * Temperature reading from the sensor. Reading is in 0.125 Celsius + * degrees units. + * Access: RO + */ +MLXSW_ITEM32(reg, mtmp, temperature, 0x04, 0, 16); + +/* reg_mtmp_mte + * Max Temperature Enable - enables measuring the max temperature on a sensor. + * Access: RW + */ +MLXSW_ITEM32(reg, mtmp, mte, 0x08, 31, 1); + +/* reg_mtmp_mtr + * Max Temperature Reset - clears the value of the max temperature register. + * Access: WO + */ +MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1); + +/* reg_mtmp_max_temperature + * The highest measured temperature from the sensor. + * When the bit mte is cleared, the field max_temperature is reserved. + * Access: RO + */ +MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16); + +#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8 + +/* reg_mtmp_sensor_name + * Sensor Name + * Access: RO + */ +MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE); + +static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index, + bool max_temp_enable, + bool max_temp_reset) +{ + MLXSW_REG_ZERO(mtmp, payload); + mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); + mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); + mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); +} + +static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, + unsigned int *p_max_temp, + char *sensor_name) +{ + u16 temp; + + if (p_temp) { + temp = mlxsw_reg_mtmp_temperature_get(payload); + *p_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } + if (p_max_temp) { + temp = mlxsw_reg_mtmp_max_temperature_get(payload); + *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } + if (sensor_name) + mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); +} + +/* MLCR - Management LED Control Register + * -------------------------------------- + * Controls the system LEDs. + */ +#define MLXSW_REG_MLCR_ID 0x902B +#define MLXSW_REG_MLCR_LEN 0x0C + +static const struct mlxsw_reg_info mlxsw_reg_mlcr = { + .id = MLXSW_REG_MLCR_ID, + .len = MLXSW_REG_MLCR_LEN, +}; + +/* reg_mlcr_local_port + * Local port number. + * Access: RW + */ +MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8); + +#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF + +/* reg_mlcr_beacon_duration + * Duration of the beacon to be active, in seconds. + * 0x0 - Will turn off the beacon. + * 0xFFFF - Will turn on the beacon until explicitly turned off. + * Access: RW + */ +MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16); + +/* reg_mlcr_beacon_remain + * Remaining duration of the beacon, in seconds. + * 0xFFFF indicates an infinite amount of time. + * Access: RO + */ +MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16); + +static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, + bool active) +{ + MLXSW_REG_ZERO(mlcr, payload); + mlxsw_reg_mlcr_local_port_set(payload, local_port); + mlxsw_reg_mlcr_beacon_duration_set(payload, active ? + MLXSW_REG_MLCR_DURATION_MAX : 0); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -2357,6 +3101,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SGCR"; case MLXSW_REG_SPAD_ID: return "SPAD"; + case MLXSW_REG_SMID_ID: + return "SMID"; case MLXSW_REG_SSPR_ID: return "SSPR"; case MLXSW_REG_SFDAT_ID: @@ -2375,6 +3121,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; + case MLXSW_REG_SLDR_ID: + return "SLDR"; + case MLXSW_REG_SLCR_ID: + return "SLCR"; + case MLXSW_REG_SLCOR_ID: + return "SLCOR"; case MLXSW_REG_SPMLR_ID: return "SPMLR"; case MLXSW_REG_SVFA_ID: @@ -2405,6 +3157,18 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_MFCR_ID: + return "MFCR"; + case MLXSW_REG_MFSC_ID: + return "MFSC"; + case MLXSW_REG_MFSM_ID: + return "MFSM"; + case MLXSW_REG_MTCAP_ID: + return "MTCAP"; + case MLXSW_REG_MTMP_ID: + return "MTMP"; + case MLXSW_REG_MLCR_ID: + return "MLCR"; case MLXSW_REG_SBPR_ID: return "SBPR"; case MLXSW_REG_SBCM_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3be4a2355ead..ce6845d534a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -48,6 +48,7 @@ #include <linux/workqueue.h> #include <linux/jiffies.h> #include <linux/bitops.h> +#include <linux/list.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -186,33 +187,6 @@ static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } -static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - int err; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - - if (err) - return err; - - set_bit(vfid, mlxsw_sp->active_vfids); - return 0; -} - -static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - clear_bit(vfid, mlxsw_sp->active_vfids); - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned char *addr) { @@ -417,6 +391,10 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static void mlxsw_sp_set_rx_mode(struct net_device *dev) +{ +} + static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -545,12 +523,132 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static struct mlxsw_sp_vfid * +mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { + if (vfid->vid == vid) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, + MLXSW_SP_VFID_PORT_MAX); +} + +static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + u16 vid) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->vid = vid; + + list_add(&vfid->list, &mlxsw_sp->port_vfids.list); + set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static struct mlxsw_sp_port * +mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_vfid *vfid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); + if (!mlxsw_sp_vport) + return NULL; + + /* dev will be set correctly after the VLAN device is linked + * with the real device. In case of bridge SELF invocation, dev + * will remain as is. + */ + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; + mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; + mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; + mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; + mlxsw_sp_vport->vport.vfid = vfid; + mlxsw_sp_vport->vport.vid = vfid->vid; + + list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); + + return mlxsw_sp_vport; +} + +static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + list_del(&mlxsw_sp_vport->vport.list); + kfree(mlxsw_sp_vport); +} + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *sftr_pl; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is added to HW filter when device goes up, but it is @@ -559,100 +657,105 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, if (!vid) return 0; - if (test_bit(vid, mlxsw_sp_port->active_vfids)) { + if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { netdev_warn(dev, "VID=%d already configured\n", vid); return 0; } - if (!test_bit(vid, mlxsw_sp->active_vfids)) { - err = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (err) { - netdev_err(dev, "Failed to create vFID=%d\n", - MLXSW_SP_VFID_BASE + vid); - return err; + vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!vfid) { + vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(vfid); } + } - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) { - err = -ENOMEM; - goto err_flood_table_alloc; - } - mlxsw_reg_sftr_pack(sftr_pl, 0, vid, - MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, - MLXSW_PORT_CPU_PORT, true); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); - kfree(sftr_pl); + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); + if (!mlxsw_sp_vport) { + netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); + err = -ENOMEM; + goto err_port_vport_create; + } + + if (!vfid->nr_vports) { + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, + true, false); if (err) { - netdev_err(dev, "Failed to configure flood table\n"); - goto err_flood_table_config; + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_vport_flood_set; } } - /* In case we fail in the following steps, we intentionally do not - * destroy the associated vFID. - */ - /* When adding the first VLAN interface on a bridged port we need to * transition all the active 802.1Q bridge VLANs to use explicit * {Port, VID} to FID mappings and set the port's mode to Virtual mode. */ - if (!mlxsw_sp_port->nr_vfids) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to Virtual mode\n"); - return err; + goto err_port_vp_mode_trans; } } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, MLXSW_SP_VFID_BASE + vid, vid); + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); if (err) { netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); goto err_port_vid_to_fid_set; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); if (err) { netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); goto err_port_vid_learning_set; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); goto err_port_add_vid; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_FORWARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); goto err_port_stp_state_set; } - mlxsw_sp_port->nr_vfids++; - set_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports++; return 0; -err_flood_table_config: -err_flood_table_alloc: - mlxsw_sp_vfid_destroy(mlxsw_sp, vid); - return err; - err_port_stp_state_set: - mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err_port_add_vid: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - MLXSW_SP_VFID_BASE + vid, vid); + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); err_port_vid_to_fid_set: - mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + if (list_is_singular(&mlxsw_sp_port->vports_list)) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); +err_port_vp_mode_trans: + if (!vfid->nr_vports) + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); +err_vport_flood_set: + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); +err_port_vport_create: + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; } @@ -660,6 +763,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is removed from HW filter when device goes down, but @@ -668,38 +773,42 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, if (!vid) return 0; - if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { netdev_warn(dev, "VID=%d does not exist\n", vid); return 0; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + vfid = mlxsw_sp_vport->vport.vfid; + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_DISCARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); if (err) { netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, MLXSW_SP_VFID_BASE + vid, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); if (err) { netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); return err; } @@ -707,7 +816,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, * transition all active 802.1Q bridge VLANs to use VID to FID * mappings and set port's mode to VLAN mode. */ - if (mlxsw_sp_port->nr_vfids == 1) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to VLAN mode\n"); @@ -715,8 +824,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, } } - mlxsw_sp_port->nr_vfids--; - clear_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports--; + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); + + /* Destroy the vFID if no vPorts are assigned to it anymore. */ + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); return 0; } @@ -725,6 +838,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, .ndo_change_mtu = mlxsw_sp_port_change_mtu, .ndo_get_stats64 = mlxsw_sp_port_get_stats64, @@ -859,6 +973,29 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, } } +static int mlxsw_sp_port_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mlcr_pl[MLXSW_REG_MLCR_LEN]; + bool active; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + active = true; + break; + case ETHTOOL_ID_INACTIVE: + active = false; + break; + default: + return -EOPNOTSUPP; + } + + mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); +} + static void mlxsw_sp_port_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -1205,6 +1342,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_drvinfo = mlxsw_sp_port_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = mlxsw_sp_port_get_strings, + .set_phys_id = mlxsw_sp_port_set_phys_id, .get_ethtool_stats = mlxsw_sp_port_get_stats, .get_sset_count = mlxsw_sp_port_get_sset_count, .get_settings = mlxsw_sp_port_get_settings, @@ -1216,6 +1354,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *dev; bool usable; + size_t bytes; int err; dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); @@ -1225,10 +1364,18 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; mlxsw_sp_port->local_port = local_port; - mlxsw_sp_port->learning = 1; - mlxsw_sp_port->learning_sync = 1; - mlxsw_sp_port->uc_flood = 1; - mlxsw_sp_port->pvid = 1; + bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); + mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->active_vlans) { + err = -ENOMEM; + goto err_port_active_vlans_alloc; + } + mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->untagged_vlans) { + err = -ENOMEM; + goto err_port_untagged_vlans_alloc; + } + INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); mlxsw_sp_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); @@ -1330,16 +1477,29 @@ err_port_module_check: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: + kfree(mlxsw_sp_port->untagged_vlans); +err_port_untagged_vlans_alloc: + kfree(mlxsw_sp_port->active_vlans); +err_port_active_vlans_alloc: free_netdev(dev); return err; } -static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) { - u16 vfid; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; - for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); + list_for_each_entry_safe(mlxsw_sp_vport, tmp, + &mlxsw_sp_port->vports_list, vport.list) { + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + /* vPorts created for VLAN devices should already be gone + * by now, since we unregistered the port netdev. + */ + WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); + mlxsw_sp_port_kill_vid(dev, 0, vid); + } } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) @@ -1348,10 +1508,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) if (!mlxsw_sp_port) return; - mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); + kfree(mlxsw_sp_port->untagged_vlans); + kfree(mlxsw_sp_port->active_vlans); free_netdev(mlxsw_sp_port->dev); } @@ -1633,16 +1795,15 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, enum mlxsw_sp_flood_table flood_table; char sfgc_pl[MLXSW_REG_SFGC_LEN]; - if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { + if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; - flood_table = 0; - } else { + else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) - flood_table = MLXSW_SP_FLOOD_TABLE_UC; - else - flood_table = MLXSW_SP_FLOOD_TABLE_BM; - } + + if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + flood_table = MLXSW_SP_FLOOD_TABLE_UC; + else + flood_table = MLXSW_SP_FLOOD_TABLE_BM; mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, flood_table); @@ -1653,9 +1814,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) { int type, err; - /* For non-offloaded netdevs, flood all traffic types to CPU - * port. - */ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { if (type == MLXSW_REG_SFGC_TYPE_RESERVED) continue; @@ -1664,15 +1822,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); if (err) return err; - } - - /* For bridged ports, use one flooding table for unknown unicast - * traffic and a second table for unregistered multicast and - * broadcast. - */ - for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { - if (type == MLXSW_REG_SFGC_TYPE_RESERVED) - continue; err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); @@ -1683,6 +1832,22 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) +{ + char slcr_pl[MLXSW_REG_SLCR_LEN]; + + mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | + MLXSW_REG_SLCR_LAG_HASH_DMAC | + MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | + MLXSW_REG_SLCR_LAG_HASH_VLANID | + MLXSW_REG_SLCR_LAG_HASH_SIP | + MLXSW_REG_SLCR_LAG_HASH_DIP | + MLXSW_REG_SLCR_LAG_HASH_SPORT | + MLXSW_REG_SLCR_LAG_HASH_DPORT | + MLXSW_REG_SLCR_LAG_HASH_IPPROTO); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); +} + static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -1691,6 +1856,9 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); err = mlxsw_sp_base_mac_get(mlxsw_sp); if (err) { @@ -1701,7 +1869,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); - goto err_ports_create; + return err; } err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); @@ -1728,6 +1896,12 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, goto err_buffers_init; } + err = mlxsw_sp_lag_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); + goto err_lag_init; + } + err = mlxsw_sp_switchdev_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); @@ -1737,6 +1911,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, return 0; err_switchdev_init: +err_lag_init: err_buffers_init: err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); @@ -1744,8 +1919,6 @@ err_rx_listener_register: mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); err_event_register: mlxsw_sp_ports_remove(mlxsw_sp); -err_ports_create: - mlxsw_sp_vfids_fini(mlxsw_sp); return err; } @@ -1757,18 +1930,17 @@ static void mlxsw_sp_fini(void *priv) mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); - mlxsw_sp_vfids_fini(mlxsw_sp); } static struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_lag = 1, - .max_lag = 64, + .max_lag = MLXSW_SP_LAG_MAX, .used_max_port_per_lag = 1, - .max_port_per_lag = 16, + .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, .used_max_mid = 1, - .max_mid = 7000, + .max_mid = MLXSW_SP_MID_MAX, .used_max_pgt = 1, .max_pgt = 0, .used_max_system_port = 1, @@ -1782,8 +1954,8 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .flood_mode = 3, .max_fid_offset_flood_tables = 2, .fid_offset_flood_table_size = VLAN_N_VID - 1, - .max_fid_flood_tables = 1, - .fid_flood_table_size = VLAN_N_VID, + .max_fid_flood_tables = 2, + .fid_flood_table_size = MLXSW_SP_VFID_MAX, .used_max_ib_mc = 1, .max_ib_mc = 0, .used_max_pkey = 1, @@ -1824,24 +1996,29 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) */ err = mlxsw_sp_port_kill_vid(dev, 0, 1); if (err) - netdev_err(dev, "Failed to remove VID 1\n"); + return err; - return err; + mlxsw_sp_port->learning = 1; + mlxsw_sp_port->learning_sync = 1; + mlxsw_sp_port->uc_flood = 1; + mlxsw_sp_port->bridged = 1; + + return 0; } static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; - int err; + + mlxsw_sp_port->learning = 0; + mlxsw_sp_port->learning_sync = 0; + mlxsw_sp_port->uc_flood = 0; + mlxsw_sp_port->bridged = 0; /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - err = mlxsw_sp_port_add_vid(dev, 0, 1); - if (err) - netdev_err(dev, "Failed to add VID 1\n"); - - return err; + return mlxsw_sp_port_add_vid(dev, 0, 1); } static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, @@ -1865,19 +2042,293 @@ static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->master_bridge.dev = NULL; } -static int mlxsw_sp_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id, u8 port_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id, port_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + u16 *p_lag_id) +{ + struct mlxsw_sp_upper *lag; + int free_lag_id = -1; + int i; + + for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { + lag = mlxsw_sp_lag_get(mlxsw_sp, i); + if (lag->ref_count) { + if (lag->dev == lag_dev) { + *p_lag_id = i; + return 0; + } + } else if (free_lag_id < 0) { + free_lag_id = i; + } + } + if (free_lag_id < 0) + return -EBUSY; + *p_lag_id = free_lag_id; + return 0; +} + +static bool +mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + struct netdev_lag_upper_info *lag_upper_info) +{ + u16 lag_id; + + if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) + return false; + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return false; + return true; +} + +static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, + u16 lag_id, u8 *p_port_index) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { + *p_port_index = i; + return 0; + } + } + return -EBUSY; +} + +static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id; + u8 port_index; + int err; + + err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); + if (err) + return err; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + if (!lag->ref_count) { + err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); + if (err) + return err; + lag->dev = lag_dev; + } + + err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); + if (err) + return err; + err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); + if (err) + goto err_col_port_add; + err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); + if (err) + goto err_col_port_enable; + + mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lag_id = lag_id; + mlxsw_sp_port->lagged = 1; + lag->ref_count++; + return 0; + +err_col_port_add: + if (!lag->ref_count) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); +err_col_port_enable: + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + return err; +} + +static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id = mlxsw_sp_port->lag_id; + int err; + + if (!mlxsw_sp_port->lagged) + return 0; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + WARN_ON(lag->ref_count == 0); + + err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + if (err) + return err; + err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + if (err) + return err; + + if (lag->ref_count == 1) { + err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); + if (err) + return err; + } + + mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lagged = 0; + lag->ref_count--; + return 0; +} + +static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool lag_tx_enabled) +{ + if (lag_tx_enabled) + return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + else + return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, + mlxsw_sp_port->lag_id); +} + +static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, + struct netdev_lag_lower_state_info *info) +{ + return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev); + +static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + mlxsw_sp_vport->dev = vlan_dev; + + return 0; +} + +static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + /* When removing a VLAN device while still bridged we should first + * remove it from the bridge, as we receive the bridge's notification + * when the vPort is already gone. + */ + if (mlxsw_sp_vport->bridged) { + struct net_device *br_dev; + + br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); + } + + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + + return 0; +} + +static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, + unsigned long event, void *ptr) { - struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; int err; - if (!mlxsw_sp_port_dev_check(dev)) - return NOTIFY_DONE; - mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; info = ptr; @@ -1885,28 +2336,66 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; /* HW limitation forbids to put ports to multiple bridges. */ - if (info->master && info->linking && - netif_is_bridge_master(upper_dev) && + if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) return NOTIFY_BAD; + if (netif_is_lag_master(upper_dev) && + !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, + info->upper_info)) + return NOTIFY_BAD; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (info->master && - netif_is_bridge_master(upper_dev)) { + if (is_vlan_dev(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to link VLAN device\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to unlink VLAN device\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_bridge_master(upper_dev)) { if (info->linking) { err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) + if (err) { netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - mlxsw_sp_port->bridged = 1; } else { err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); - if (err) - netdev_err(dev, "Failed to leave bridge\n"); - mlxsw_sp_port->bridged = 0; mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_lag_master(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_lag_join(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join link aggregation\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave link aggregation\n"); + return NOTIFY_BAD; + } } } break; @@ -1915,6 +2404,443 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } +static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changelowerstate_info *info; + struct mlxsw_sp_port *mlxsw_sp_port; + int err; + + mlxsw_sp_port = netdev_priv(dev); + info = ptr; + + switch (event) { + case NETDEV_CHANGELOWERSTATE: + if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { + err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, + info->lower_state_info); + if (err) + netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); + } + break; + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_port_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + switch (event) { + case NETDEV_PRECHANGEUPPER: + case NETDEV_CHANGEUPPER: + return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); + case NETDEV_CHANGELOWERSTATE: + return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static struct mlxsw_sp_vfid * +mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { + if (vfid->br_dev == br_dev) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) +{ + return vfid - MLXSW_SP_VFID_PORT_MAX; +} + +static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) +{ + return MLXSW_SP_VFID_PORT_MAX + br_vfid; +} + +static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, + MLXSW_SP_VFID_BR_MAX); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); + if (n_vfid == MLXSW_SP_VFID_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->br_dev = br_dev; + + list_add(&vfid->list, &mlxsw_sp->br_vfids.list); + set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + + clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid, *new_vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + WARN_ON(!vfid); + return -EINVAL; + } + + /* We need a vFID to go back to after leaving the bridge's vFID. */ + new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!new_vfid) { + new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(new_vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(new_vfid); + } + } + + /* Invalidate existing {Port, VID} to vFID mapping and create a new + * one for the new vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(new_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + new_vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + if (err) { + netdev_err(dev, "Failed to disable learning\n"); + goto err_port_vid_learning_set; + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); + if (err) { + netdev_err(dev, "Failed clear to clear flooding\n"); + goto err_vport_flood_set; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + new_vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = new_vfid; + vfid->nr_vports--; + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; + + return 0; + +err_vport_flood_set: +err_port_vid_learning_set: +err_port_vid_to_fid_validate: +err_port_vid_to_fid_invalidate: + /* Rollback vFID only if new. */ + if (!new_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); + return err; +} + +static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create bridge vFID\n"); + return PTR_ERR(vfid); + } + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + if (err) { + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_port_flood_set; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); + if (err) { + netdev_err(dev, "Failed to enable learning\n"); + goto err_port_vid_learning_set; + } + + /* We need to invalidate existing {Port, VID} to vFID mapping and + * create a new one for the bridge's vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + old_vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = vfid; + old_vfid->nr_vports--; + if (!old_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); + + mlxsw_sp_vport->learning = 1; + mlxsw_sp_vport->learning_sync = 1; + mlxsw_sp_vport->uc_flood = 1; + mlxsw_sp_vport->bridged = 1; + + return 0; + +err_port_vid_to_fid_validate: + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); +err_port_vid_to_fid_invalidate: + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); +err_port_vid_learning_set: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); +err_port_flood_set: + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + return err; +} + +static bool +mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, + const struct net_device *br_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + return false; + } + + return true; +} + +static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct net_device *upper_dev; + int err; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; + if (!netif_is_bridge_master(upper_dev)) + return NOTIFY_BAD; + /* We can't have multiple VLAN interfaces configured on + * the same port and being members in the same bridge. + */ + if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, + upper_dev)) + return NOTIFY_BAD; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master) + break; + if (info->linking) { + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return NOTIFY_BAD; + } + err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } + } else { + /* We ignore bridge's unlinking notifications if vPort + * is gone, since we already left the bridge when the + * VLAN device was unlinked from the real device. + */ + if (!mlxsw_sp_vport) + return NOTIFY_DONE; + err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, + vid); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, + unsigned long event, void *ptr) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, + vid); + else if (netif_is_lag_master(real_dev)) + return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, + vid); + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (mlxsw_sp_port_dev_check(dev)) + return mlxsw_sp_netdevice_port_event(dev, event, ptr); + + if (netif_is_lag_master(dev)) + return mlxsw_sp_netdevice_lag_event(dev, event, ptr); + + if (is_vlan_dev(dev)) + return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + + return NOTIFY_DONE; +} + static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { .notifier_call = mlxsw_sp_netdevice_event, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4365c8bccc6d..a23dc610d259 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -41,16 +41,73 @@ #include <linux/netdevice.h> #include <linux/bitops.h> #include <linux/if_vlan.h> +#include <linux/list.h> #include <net/switchdev.h> +#include "port.h" #include "core.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID +#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ +#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */ +#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) + +#define MLXSW_SP_LAG_MAX 64 +#define MLXSW_SP_PORT_PER_LAG_MAX 16 + +#define MLXSW_SP_MID_MAX 7000 struct mlxsw_sp_port; +struct mlxsw_sp_upper { + struct net_device *dev; + unsigned int ref_count; +}; + +struct mlxsw_sp_vfid { + struct list_head list; + u16 nr_vports; + u16 vfid; /* Starting at 0 */ + struct net_device *br_dev; + u16 vid; +}; + +struct mlxsw_sp_mid { + struct list_head list; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 mid; + unsigned int ref_count; +}; + +static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid) +{ + return MLXSW_SP_VFID_BASE + vfid; +} + +static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) +{ + return fid - MLXSW_SP_VFID_BASE; +} + +static inline bool mlxsw_sp_fid_is_vfid(u16 fid) +{ + return fid >= MLXSW_SP_VFID_BASE; +} + struct mlxsw_sp { - unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)]; + } port_vfids; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)]; + } br_vfids; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)]; + } br_mids; unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; struct mlxsw_sp_port **ports; struct mlxsw_core *core; @@ -63,12 +120,17 @@ struct mlxsw_sp { } fdb_notify; #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; - struct { - struct net_device *dev; - unsigned int ref_count; - } master_bridge; + struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */ + struct mlxsw_sp_upper master_bridge; + struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; }; +static inline struct mlxsw_sp_upper * +mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + return &mlxsw_sp->lags[lag_id]; +} + struct mlxsw_sp_port_pcpu_stats { u64 rx_packets; u64 rx_bytes; @@ -87,15 +149,87 @@ struct mlxsw_sp_port { u8 learning:1, learning_sync:1, uc_flood:1, - bridged:1; + bridged:1, + lagged:1; u16 pvid; + u16 lag_id; + struct { + struct list_head list; + struct mlxsw_sp_vfid *vfid; + u16 vid; + } vport; /* 802.1Q bridge VLANs */ - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long *active_vlans; + unsigned long *untagged_vlans; /* VLAN interfaces */ - unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; - u16 nr_vfids; + struct list_head vports_list; }; +static inline struct mlxsw_sp_port * +mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + u8 local_port; + + local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, + lag_id, port_index); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; +} + +static inline bool +mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port->vport.vfid; +} + +static inline struct net_device * +mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vfid->br_dev; +} + +static inline u16 +mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vid; +} + +static inline u16 +mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vfid->vfid; +} + +static inline struct mlxsw_sp_port * +mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid) + return mlxsw_sp_vport; + } + + return NULL; +} + +static inline struct mlxsw_sp_port * +mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 vfid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid) + return mlxsw_sp_vport; + } + + return NULL; +} + enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_BM, @@ -118,5 +252,7 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, + bool set, bool only_uc); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 617fb22b5d81..ffe894e6d287 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -51,12 +51,50 @@ #include "core.h" #include "reg.h" +static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid) +{ + u16 fid = vid; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + + fid = mlxsw_sp_vfid_to_fid(vfid); + } + + if (!fid) + fid = mlxsw_sp_port->pvid; + + return fid; +} + +static struct mlxsw_sp_port * +mlxsw_sp_port_orig_get(struct net_device *dev, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid; + + if (!is_vlan_dev(dev)) + return mlxsw_sp_port; + + vid = vlan_dev_vlan_id(dev); + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + WARN_ON(!mlxsw_sp_vport); + + return mlxsw_sp_vport; +} + static int mlxsw_sp_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); @@ -105,8 +143,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!spms_pl) return -ENOMEM; mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + } else { + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + } err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); kfree(spms_pl); @@ -124,22 +168,38 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); } +static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) +{ + return vfid >= MLXSW_SP_VFID_PORT_MAX; +} + static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid_begin, u16 fid_end, bool set, + u16 idx_begin, u16 idx_end, bool set, bool only_uc) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u16 range = fid_end - fid_begin + 1; + u16 local_port = mlxsw_sp_port->local_port; + enum mlxsw_flood_table_type table_type; + u16 range = idx_end - idx_begin + 1; char *sftr_pl; int err; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; + if (mlxsw_sp_vfid_is_vport_br(idx_begin)) + local_port = mlxsw_sp_port->local_port; + else + local_port = MLXSW_PORT_CPU_PORT; + } else { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; + } + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin, - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, - mlxsw_sp_port->local_port, set); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); if (err) goto buffer_out; @@ -150,9 +210,8 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, if (only_uc) goto buffer_out; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin, - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, - mlxsw_sp_port->local_port, set); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); buffer_out: @@ -167,6 +226,13 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, last_visited_vid; int err; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + + return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, + set, true); + } + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, true); @@ -185,6 +251,16 @@ err_port_flood_set: return err; } +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, + bool set, bool only_uc) +{ + /* In case of vFIDs, index into the flooding table is relative to + * the start of the vFIDs range. + */ + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, + only_uc); +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, unsigned long brport_flags) @@ -193,6 +269,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, bool set; int err; + if (!mlxsw_sp_port->bridged) + return -EINVAL; + if (switchdev_trans_ph_prepare(trans)) return 0; @@ -237,6 +316,22 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } +static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool vlan_enabled) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + /* SWITCHDEV_TRANS_PREPARE phase */ + if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { + netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); + return -EINVAL; + } + + return 0; +} + static int mlxsw_sp_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -244,6 +339,10 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, @@ -257,6 +356,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.vlan_filtering); + break; default: err = -EOPNOTSUPP; break; @@ -304,7 +408,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { enum mlxsw_reg_svfa_mt mt; - if (mlxsw_sp_port->nr_vfids) + if (!list_empty(&mlxsw_sp_port->vports_list)) mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; else mt = MLXSW_REG_SVFA_MT_VID_TO_FID; @@ -316,7 +420,7 @@ static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { enum mlxsw_reg_svfa_mt mt; - if (!mlxsw_sp_port->nr_vfids) + if (list_empty(&mlxsw_sp_port->vports_list)) return 0; mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; @@ -342,14 +446,35 @@ err_port_add_vid: return err; } +static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, bool is_member, + bool untagged) +{ + u16 vid, vid_e; + int err; + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, + is_member, untagged); + if (err) + return err; + } + + return 0; +} + static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool flag_untagged, bool flag_pvid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; + u16 vid, last_visited_vid, old_pvid; enum mlxsw_reg_svfa_mt mt; - u16 vid, vid_e; int err; /* In case this is invoked with BRIDGE_FLAGS_SELF and port is @@ -377,15 +502,18 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, if (err) { netdev_err(dev, "Failed to create FID=VID=%d mapping\n", vid); - return err; + goto err_port_vid_to_fid_set; } } + } - /* Set FID mapping according to port's mode */ + /* Set FID mapping according to port's mode */ + for (vid = vid_begin; vid <= vid_end; vid++) { err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); if (err) { netdev_err(dev, "Failed to map FID=%d", vid); - return err; + last_visited_vid = --vid; + goto err_port_fid_map; } } @@ -393,83 +521,133 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, true, false); if (err) { netdev_err(dev, "Failed to configure flooding\n"); - return err; + goto err_port_flood_set; } - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true, - flag_untagged); - if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n", - vid, vid_e); - return err; - } + err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, + true, flag_untagged); + if (err) { + netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, + vid_end); + goto err_port_vlans_set; } - vid = vid_begin; - if (flag_pvid && mlxsw_sp_port->pvid != vid) { - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + old_pvid = mlxsw_sp_port->pvid; + if (flag_pvid && old_pvid != vid_begin) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n", - vid); - return err; + netdev_err(dev, "Unable to add PVID %d\n", vid_begin); + goto err_port_pvid_set; } - mlxsw_sp_port->pvid = vid; + mlxsw_sp_port->pvid = vid_begin; } /* Changing activity bits only if HW operation succeded */ - for (vid = vid_begin; vid <= vid_end; vid++) + for (vid = vid_begin; vid <= vid_end; vid++) { set_bit(vid, mlxsw_sp_port->active_vlans); + if (flag_untagged) + set_bit(vid, mlxsw_sp_port->untagged_vlans); + else + clear_bit(vid, mlxsw_sp_port->untagged_vlans); + } - return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, - mlxsw_sp_port->stp_state); + /* STP state change must be done after we set active VLANs */ + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, + mlxsw_sp_port->stp_state); + if (err) { + netdev_err(dev, "Failed to set STP state\n"); + goto err_port_stp_state_set; + } + + return 0; + +err_port_vid_to_fid_set: + mlxsw_sp_fid_destroy(mlxsw_sp, vid); + return err; + +err_port_stp_state_set: + for (vid = vid_begin; vid <= vid_end; vid++) + clear_bit(vid, mlxsw_sp_port->active_vlans); + if (old_pvid != mlxsw_sp_port->pvid) + mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); +err_port_pvid_set: + __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); +err_port_vlans_set: + __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); +err_port_flood_set: + last_visited_vid = vid_end; +err_port_fid_map: + for (vid = last_visited_vid; vid >= vid_begin; vid--) + mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); + return err; } static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { - bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID; + bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; if (switchdev_trans_ph_prepare(trans)) return 0; return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan->vid_begin, vlan->vid_end, - untagged_flag, pvid_flag); + flag_untagged, flag_pvid); +} + +static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) +{ + return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; +} + +static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) +{ + return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : + MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port, - const char *mac, u16 vid, bool adding, - bool dynamic) +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const char *mac, u16 fid, bool adding, + bool dynamic) { - enum mlxsw_reg_sfd_rec_policy policy; - enum mlxsw_reg_sfd_op op; char *sfd_pl; int err; - if (!vid) - vid = mlxsw_sp_port->pvid; + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), + mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, + local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + kfree(sfd_pl); + + return err; +} + +static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, + const char *mac, u16 fid, u16 lag_vid, + bool adding, bool dynamic) +{ + char *sfd_pl; + int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); if (!sfd_pl) return -ENOMEM; - policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; - op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : - MLXSW_REG_SFD_OP_WRITE_REMOVE; - mlxsw_reg_sfd_pack(sfd_pl, op, 0); - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, - mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP, - mlxsw_sp_port->local_port); - err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd), - sfd_pl); + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), + mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, + lag_vid, lag_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); return err; @@ -480,11 +658,162 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); + u16 lag_vid = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + + if (!mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + fdb->addr, fid, true, false); + else + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->lag_id, + fdb->addr, fid, lag_vid, + true, false); +} + +static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, + u16 fid, u16 mid, bool adding) +{ + char *sfd_pl; + int err; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, mid); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + kfree(sfd_pl); + return err; +} + +static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, + bool add, bool clear_all_ports) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *smid_pl; + int err, i; + + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + + mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); + if (clear_all_ports) { + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + if (mlxsw_sp->ports[i]) + mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + return err; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + + list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { + if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) + return mid; + } + return NULL; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + u16 mid_idx; + + mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, + MLXSW_SP_MID_MAX); + if (mid_idx == MLXSW_SP_MID_MAX) + return NULL; + + mid = kzalloc(sizeof(*mid), GFP_KERNEL); + if (!mid) + return NULL; + + set_bit(mid_idx, mlxsw_sp->br_mids.mapped); + ether_addr_copy(mid->addr, addr); + mid->vid = vid; + mid->mid = mid_idx; + mid->ref_count = 0; + list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); + + return mid; +} + +static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid) +{ + if (--mid->ref_count == 0) { + list_del(&mid->list); + clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); + kfree(mid); + return 1; + } + return 0; +} + +static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + int err = 0; + if (switchdev_trans_ph_prepare(trans)) return 0; - return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, - true, false); + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to allocate MC group\n"); + return -ENOMEM; + } + } + mid->ref_count++; + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, + mid->ref_count == 1); + if (err) { + netdev_err(dev, "Unable to set SMID\n"); + goto err_out; + } + + if (mid->ref_count == 1) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, + true); + if (err) { + netdev_err(dev, "Unable to set MC SFD\n"); + goto err_out; + } + } + + return 0; + +err_out: + __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); + return err; } static int mlxsw_sp_port_obj_add(struct net_device *dev, @@ -494,8 +823,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) + return 0; + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj), trans); @@ -505,6 +841,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, SWITCHDEV_OBJ_PORT_FDB(obj), trans); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj), + trans); + break; default: err = -EOPNOTSUPP; break; @@ -532,7 +873,7 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool init) { struct net_device *dev = mlxsw_sp_port->dev; - u16 vid, vid_e; + u16 vid, pvid; int err; /* In case this is invoked with BRIDGE_FLAGS_SELF and port is @@ -542,30 +883,23 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, if (!init && !mlxsw_sp_port->bridged) return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false, - false); - if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n", - vid, vid_e); - return err; - } + err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); + if (err) { + netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, + vid_end); + return err; } - if ((mlxsw_sp_port->pvid >= vid_begin) && - (mlxsw_sp_port->pvid <= vid_end)) { + pvid = mlxsw_sp_port->pvid; + if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { /* Default VLAN is always 1 */ - mlxsw_sp_port->pvid = 1; - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, - mlxsw_sp_port->pvid); + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n", - vid); + netdev_err(dev, "Unable to del PVID %d\n", pvid); return err; } + mlxsw_sp_port->pvid = 1; } if (init) @@ -606,8 +940,54 @@ static int mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb) { - return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, - false, false); + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); + u16 lag_vid = 0; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + + if (!mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + fdb->addr, fid, + false, false); + else + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->lag_id, + fdb->addr, fid, lag_vid, + false, false); +} + +static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + u16 mid_idx; + int err = 0; + + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to remove port from MC DB\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + + mid_idx = mid->mid; + if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, + false); + if (err) + netdev_err(dev, "Unable to remove MC SFD\n"); + } + + return err; } static int mlxsw_sp_port_obj_del(struct net_device *dev, @@ -616,8 +996,15 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) + return 0; + err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; @@ -625,6 +1012,9 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj)); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj)); default: err = -EOPNOTSUPP; break; @@ -633,14 +1023,31 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, return err; } +static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, + u16 lag_id) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + int i; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (mlxsw_sp_port) + return mlxsw_sp_port; + } + return NULL; +} + static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u16 vport_vid = 0, vport_fid = 0; char *sfd_pl; char mac[ETH_ALEN]; - u16 vid; + u16 fid; u8 local_port; + u16 lag_id; u8 num_rec; int stored_err = 0; int i; @@ -650,11 +1057,19 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, if (!sfd_pl) return -ENOMEM; + mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 tmp; + + tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + vport_fid = mlxsw_sp_vfid_to_fid(tmp); + vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); do { mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); - err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, - MLXSW_REG(sfd), sfd_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) goto out; @@ -669,21 +1084,46 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, for (i = 0; i < num_rec; i++) { switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { case MLXSW_REG_SFD_REC_TYPE_UNICAST: - mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid, + mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, &local_port); if (local_port == mlxsw_sp_port->local_port) { + if (vport_fid && vport_fid != fid) + continue; + else if (vport_fid) + fdb->vid = vport_vid; + else + fdb->vid = fid; + ether_addr_copy(fdb->addr, mac); + fdb->ndm_state = NUD_REACHABLE; + err = cb(&fdb->obj); + if (err) + stored_err = err; + } + break; + case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: + mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, + mac, &fid, &lag_id); + if (mlxsw_sp_port == + mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { + if (vport_fid && vport_fid != fid) + continue; + else if (vport_fid) + fdb->vid = vport_vid; + else + fdb->vid = fid; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; - fdb->vid = vid; err = cb(&fdb->obj); if (err) stored_err = err; } + break; } } } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); out: + mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); kfree(sfd_pl); return stored_err ? stored_err : err; } @@ -695,10 +1135,19 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid; int err = 0; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vlan->flags = 0; + vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + return cb(&vlan->obj); + } + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { vlan->flags = 0; if (vid == mlxsw_sp_port->pvid) vlan->flags |= BRIDGE_VLAN_INFO_PVID; + if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; vlan->vid_begin = vid; vlan->vid_end = vid; err = cb(&vlan->obj); @@ -715,6 +1164,10 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, @@ -740,6 +1193,21 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, }; +static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, + bool adding, char *mac, u16 vid, + struct net_device *dev) +{ + struct switchdev_notifier_fdb_info info; + unsigned long notifier_type; + + if (learning && learning_sync) { + info.addr = mac; + info.vid = vid; + notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; + call_switchdev_notifiers(notifier_type, dev, &info.info); + } +} + static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, char *sfn_pl, int rec_index, bool adding) @@ -747,34 +1215,119 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port *mlxsw_sp_port; char mac[ETH_ALEN]; u8 local_port; - u16 vid; + u16 vid, fid; + bool do_notification = true; int err; - mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port); + mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) { dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); - return; + goto just_remove; + } + + if (mlxsw_sp_fid_is_vfid(fid)) { + u16 vfid = mlxsw_sp_fid_to_vfid(fid); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, + vfid); + if (!mlxsw_sp_vport) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); + goto just_remove; + } + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + /* Override the physical port with the vPort. */ + mlxsw_sp_port = mlxsw_sp_vport; + } else { + vid = fid; } - err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid, - adding && mlxsw_sp_port->learning, true); + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: + err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, + adding, true); if (err) { if (net_ratelimit()) netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); return; } - if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) { - struct switchdev_notifier_fdb_info info; - unsigned long notifier_type; + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, + mlxsw_sp_port->learning_sync, + adding, mac, vid, mlxsw_sp_port->dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; +} - info.addr = mac; - info.vid = vid; - notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; - call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev, - &info.info); +static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, int rec_index, + bool adding) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char mac[ETH_ALEN]; + u16 lag_vid = 0; + u16 lag_id; + u16 vid, fid; + bool do_notification = true; + int err; + + mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); + mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); + if (!mlxsw_sp_port) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); + goto just_remove; } + + if (mlxsw_sp_fid_is_vfid(fid)) { + u16 vfid = mlxsw_sp_fid_to_vfid(fid); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, + vfid); + if (!mlxsw_sp_vport) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); + goto just_remove; + } + + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + lag_vid = vid; + /* Override the physical port with the vPort. */ + mlxsw_sp_port = mlxsw_sp_vport; + } else { + vid = fid; + } + + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: + err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, + adding, true); + if (err) { + if (net_ratelimit()) + netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); + return; + } + + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, + mlxsw_sp_port->learning_sync, + adding, mac, vid, + mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; } static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, @@ -789,6 +1342,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, rec_index, false); break; + case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: + mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, + rec_index, true); + break; + case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: + mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, + rec_index, false); + break; } } @@ -812,6 +1373,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); + mutex_lock(&mlxsw_sp->fdb_lock); do { mlxsw_reg_sfn_pack(sfn_pl); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); @@ -824,6 +1386,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); } while (num_rec); + mutex_unlock(&mlxsw_sp->fdb_lock); kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -838,6 +1401,7 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); return err; } + mutex_init(&mlxsw_sp->fdb_lock); INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -877,7 +1441,8 @@ int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) * with VID 1. */ mlxsw_sp_port->pvid = 1; - err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true); + err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, + true); if (err) { netdev_err(dev, "Unable to init VLANs\n"); return err; diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 2056b719c262..7df318346b05 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -600,22 +600,11 @@ static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv) static int encx24j600_hw_init(struct encx24j600_priv *priv) { - struct net_device *dev = priv->ndev; int ret = 0; - u16 eidled; u16 macon2; priv->hw_enabled = false; - eidled = encx24j600_read_reg(priv, EIDLED); - if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) { - ret = -EINVAL; - goto err_out; - } - - netif_info(priv, drv, dev, "Silicon rev ID: 0x%02x\n", - (eidled & REVID_MASK) >> REVID_SHIFT); - /* PHY Leds: link status, * LEDA: Link State + collision events * LEDB: Link State + transmit/receive events @@ -655,7 +644,6 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv) if (netif_msg_hw(priv)) encx24j600_dump_config(priv, "Hw is initialized"); -err_out: return ret; } @@ -1004,6 +992,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) struct net_device *ndev; struct encx24j600_priv *priv; + u16 eidled; ndev = alloc_etherdev(sizeof(struct encx24j600_priv)); @@ -1072,10 +1061,21 @@ static int encx24j600_spi_probe(struct spi_device *spi) goto out_free; } + eidled = encx24j600_read_reg(priv, EIDLED); + if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) { + ret = -EINVAL; + goto out_unregister; + } + + netif_info(priv, probe, ndev, "Silicon rev ID: 0x%02x\n", + (eidled & REVID_MASK) >> REVID_SHIFT); + netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr); return ret; +out_unregister: + unregister_netdev(priv->ndev); out_free: free_netdev(ndev); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 83651ac8ddb9..270c9eeb7ab6 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1488,7 +1488,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) } myri10ge_vlan_rx(mgp->dev, va, skb); skb_record_rx_queue(skb, ss - &mgp->ss[0]); - skb_mark_napi_id(skb, &ss->napi); if (polling) { int hlen; @@ -1506,6 +1505,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) skb->data_len -= hlen; skb->tail += hlen; skb->protocol = eth_type_trans(skb, dev); + skb_mark_napi_id(skb, &ss->napi); netif_receive_skb(skb); } else @@ -3814,7 +3814,6 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) ss->dev = mgp->dev; netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, myri10ge_napi_weight); - napi_hash_add(&ss->napi); } return 0; abort: diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig new file mode 100644 index 000000000000..9508ad782c30 --- /dev/null +++ b/drivers/net/ethernet/netronome/Kconfig @@ -0,0 +1,36 @@ +# +# Netronome device configuration +# + +config NET_VENDOR_NETRONOME + bool "Netronome(R) devices" + default y + ---help--- + If you have a Netronome(R) network (Ethernet) card or device, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Netronome(R) cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_NETRONOME + +config NFP_NETVF + tristate "Netronome(R) NFP4000/NFP6000 VF NIC driver" + depends on PCI && PCI_MSI + depends on VXLAN || VXLAN=n + ---help--- + This driver supports SR-IOV virtual functions of + the Netronome(R) NFP4000/NFP6000 cards working as + a advanced Ethernet NIC. + +config NFP_NET_DEBUG + bool "Debug support for Netronome(R) NFP3200/NFP6000 NIC drivers" + depends on NFP_NET || NFP_NETVF + ---help--- + Enable extra sanity checks and debugfs support in + Netronome(R) NFP3200/NFP6000 NIC PF and VF drivers. + Note: selecting this option may adversely impact + performance. + +endif diff --git a/drivers/net/ethernet/netronome/Makefile b/drivers/net/ethernet/netronome/Makefile new file mode 100644 index 000000000000..dcb7b383f634 --- /dev/null +++ b/drivers/net/ethernet/netronome/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Netronome network device drivers +# + +obj-$(CONFIG_NFP_NETVF) += nfp/ diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile new file mode 100644 index 000000000000..68178819ff12 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o + +nfp_netvf-objs := \ + nfp_net_common.o \ + nfp_net_ethtool.o \ + nfp_netvf_main.o + +nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h new file mode 100644 index 000000000000..ab264e1bccd0 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -0,0 +1,748 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net.h + * Declarations for Netronome network device driver. + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#ifndef _NFP_NET_H_ +#define _NFP_NET_H_ + +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <asm-generic/io-64-nonatomic-hi-lo.h> + +#include "nfp_net_ctrl.h" + +#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args) +#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args) +#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args) +#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args) +#define nn_warn_ratelimit(nn, fmt, args...) \ + do { \ + if (unlikely(net_ratelimit())) \ + netdev_warn((nn)->netdev, fmt, ## args); \ + } while (0) + +/* Max time to wait for NFP to respond on updates (in ms) */ +#define NFP_NET_POLL_TIMEOUT 5000 + +/* Bar allocation */ +#define NFP_NET_CRTL_BAR 0 +#define NFP_NET_Q0_BAR 2 +#define NFP_NET_Q1_BAR 4 /* OBSOLETE */ + +/* Max bits in DMA address */ +#define NFP_NET_MAX_DMA_BITS 40 + +/* Default size for MTU and freelist buffer sizes */ +#define NFP_NET_DEFAULT_MTU 1500 +#define NFP_NET_DEFAULT_RX_BUFSZ 2048 + +/* Maximum number of bytes prepended to a packet */ +#define NFP_NET_MAX_PREPEND 64 + +/* Interrupt definitions */ +#define NFP_NET_NON_Q_VECTORS 2 +#define NFP_NET_IRQ_LSC_IDX 0 +#define NFP_NET_IRQ_EXN_IDX 1 + +/* Queue/Ring definitions */ +#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */ +#define NFP_NET_MAX_RX_RINGS 64 /* Max. # of Rx rings per device */ + +#define NFP_NET_MIN_TX_DESCS 256 /* Min. # of Tx descs per ring */ +#define NFP_NET_MIN_RX_DESCS 256 /* Min. # of Rx descs per ring */ +#define NFP_NET_MAX_TX_DESCS (256 * 1024) /* Max. # of Tx descs per ring */ +#define NFP_NET_MAX_RX_DESCS (256 * 1024) /* Max. # of Rx descs per ring */ + +#define NFP_NET_TX_DESCS_DEFAULT 4096 /* Default # of Tx descs per ring */ +#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */ + +#define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */ + +/* Offload definitions */ +#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16)) + +/* Forward declarations */ +struct nfp_net; +struct nfp_net_r_vector; + +/* Convenience macro for writing dma address into RX/TX descriptors */ +#define nfp_desc_set_dma_addr(desc, dma_addr) \ + do { \ + __typeof(desc) __d = (desc); \ + dma_addr_t __addr = (dma_addr); \ + \ + __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \ + __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \ + } while (0) + +/* TX descriptor format */ + +#define PCIE_DESC_TX_EOP BIT(7) +#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0) +#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0) + +/* Flags in the host TX descriptor */ +#define PCIE_DESC_TX_CSUM BIT(7) +#define PCIE_DESC_TX_IP4_CSUM BIT(6) +#define PCIE_DESC_TX_TCP_CSUM BIT(5) +#define PCIE_DESC_TX_UDP_CSUM BIT(4) +#define PCIE_DESC_TX_VLAN BIT(3) +#define PCIE_DESC_TX_LSO BIT(2) +#define PCIE_DESC_TX_ENCAP BIT(1) +#define PCIE_DESC_TX_O_IP4_CSUM BIT(0) + +struct nfp_net_tx_desc { + union { + struct { + u8 dma_addr_hi; /* High bits of host buf address */ + __le16 dma_len; /* Length to DMA for this desc */ + u8 offset_eop; /* Offset in buf where pkt starts + + * highest bit is eop flag. + */ + __le32 dma_addr_lo; /* Low 32bit of host buf addr */ + + __le16 mss; /* MSS to be used for LSO */ + u8 l4_offset; /* LSO, where the L4 data starts */ + u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */ + + __le16 vlan; /* VLAN tag to add if indicated */ + __le16 data_len; /* Length of frame + meta data */ + } __packed; + __le32 vals[4]; + }; +}; + +/** + * struct nfp_net_tx_buf - software TX buffer descriptor + * @skb: sk_buff associated with this buffer + * @dma_addr: DMA mapping address of the buffer + * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags) + * @pkt_cnt: Number of packets to be produced out of the skb associated + * with this buffer (valid only on the head's buffer). + * Will be 1 for all non-TSO packets. + * @real_len: Number of bytes which to be produced out of the skb (valid only + * on the head's buffer). Equal to skb->len for non-TSO packets. + */ +struct nfp_net_tx_buf { + struct sk_buff *skb; + dma_addr_t dma_addr; + short int fidx; + u16 pkt_cnt; + u32 real_len; +}; + +/** + * struct nfp_net_tx_ring - TX ring structure + * @r_vec: Back pointer to ring vector structure + * @idx: Ring index from Linux's perspective + * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue + * @qcp_q: Pointer to base of the QCP TX queue + * @cnt: Size of the queue in number of descriptors + * @wr_p: TX ring write pointer (free running) + * @rd_p: TX ring read pointer (free running) + * @qcp_rd_p: Local copy of QCP TX queue read pointer + * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer + * (used for .xmit_more delayed kick) + * @txbufs: Array of transmitted TX buffers, to free on transmit + * @txds: Virtual address of TX ring in host memory + * @dma: DMA address of the TX ring + * @size: Size, in bytes, of the TX ring (needed to free) + */ +struct nfp_net_tx_ring { + struct nfp_net_r_vector *r_vec; + + u32 idx; + int qcidx; + u8 __iomem *qcp_q; + + u32 cnt; + u32 wr_p; + u32 rd_p; + u32 qcp_rd_p; + + u32 wr_ptr_add; + + struct nfp_net_tx_buf *txbufs; + struct nfp_net_tx_desc *txds; + + dma_addr_t dma; + unsigned int size; +} ____cacheline_aligned; + +/* RX and freelist descriptor format */ + +#define PCIE_DESC_RX_DD BIT(7) +#define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0) + +/* Flags in the RX descriptor */ +#define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15)) +#define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14)) +#define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13)) +#define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12)) +#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11)) +#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10)) +#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9)) +#define PCIE_DESC_RX_SPARE cpu_to_le16(BIT(8)) +#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7)) +#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6)) +#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5)) +#define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4)) +#define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3)) +#define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2)) +#define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1)) +#define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0)) + +#define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \ + PCIE_DESC_RX_TCP_CSUM | \ + PCIE_DESC_RX_UDP_CSUM | \ + PCIE_DESC_RX_I_IP4_CSUM | \ + PCIE_DESC_RX_I_TCP_CSUM | \ + PCIE_DESC_RX_I_UDP_CSUM) +#define PCIE_DESC_RX_CSUM_OK_SHIFT 1 +#define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL) +#define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \ + PCIE_DESC_RX_CSUM_OK_SHIFT) + +struct nfp_net_rx_desc { + union { + struct { + u8 dma_addr_hi; /* High bits of the buf address */ + __le16 reserved; /* Must be zero */ + u8 meta_len_dd; /* Must be zero */ + + __le32 dma_addr_lo; /* Low bits of the buffer address */ + } __packed fld; + + struct { + __le16 data_len; /* Length of the frame + meta data */ + u8 reserved; + u8 meta_len_dd; /* Length of meta data prepended + + * descriptor done flag. + */ + + __le16 flags; /* RX flags. See @PCIE_DESC_RX_* */ + __le16 vlan; /* VLAN if stripped */ + } __packed rxd; + + __le32 vals[2]; + }; +}; + +struct nfp_net_rx_hash { + __be32 hash_type; + __be32 hash; +}; + +/** + * struct nfp_net_rx_buf - software RX buffer descriptor + * @skb: sk_buff associated with this buffer + * @dma_addr: DMA mapping address of the buffer + */ +struct nfp_net_rx_buf { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +/** + * struct nfp_net_rx_ring - RX ring structure + * @r_vec: Back pointer to ring vector structure + * @cnt: Size of the queue in number of descriptors + * @wr_p: FL/RX ring write pointer (free running) + * @rd_p: FL/RX ring read pointer (free running) + * @idx: Ring index from Linux's perspective + * @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist + * @rx_qcidx: Queue Controller Peripheral (QCP) queue index for the RX queue + * @qcp_fl: Pointer to base of the QCP freelist queue + * @qcp_rx: Pointer to base of the QCP RX queue + * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer + * (used for free list batching) + * @rxbufs: Array of transmitted FL/RX buffers + * @rxds: Virtual address of FL/RX ring in host memory + * @dma: DMA address of the FL/RX ring + * @size: Size, in bytes, of the FL/RX ring (needed to free) + */ +struct nfp_net_rx_ring { + struct nfp_net_r_vector *r_vec; + + u32 cnt; + u32 wr_p; + u32 rd_p; + + u16 idx; + u16 wr_ptr_add; + + int fl_qcidx; + int rx_qcidx; + u8 __iomem *qcp_fl; + u8 __iomem *qcp_rx; + + struct nfp_net_rx_buf *rxbufs; + struct nfp_net_rx_desc *rxds; + + dma_addr_t dma; + unsigned int size; +} ____cacheline_aligned; + +/** + * struct nfp_net_r_vector - Per ring interrupt vector configuration + * @nfp_net: Backpointer to nfp_net structure + * @napi: NAPI structure for this ring vec + * @tx_ring: Pointer to TX ring + * @rx_ring: Pointer to RX ring + * @irq_idx: Index into MSI-X table + * @rx_sync: Seqlock for atomic updates of RX stats + * @rx_pkts: Number of received packets + * @rx_bytes: Number of received bytes + * @rx_drops: Number of packets dropped on RX due to lack of resources + * @hw_csum_rx_ok: Counter of packets where the HW checksum was OK + * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK + * @hw_csum_rx_error: Counter of packets with bad checksums + * @tx_sync: Seqlock for atomic updates of TX stats + * @tx_pkts: Number of Transmitted packets + * @tx_bytes: Number of Transmitted bytes + * @hw_csum_tx: Counter of packets with TX checksum offload requested + * @hw_csum_tx_inner: Counter of inner TX checksum offload requests + * @tx_gather: Counter of packets with Gather DMA + * @tx_lso: Counter of LSO packets sent + * @tx_errors: How many TX errors were encountered + * @tx_busy: How often was TX busy (no space)? + * @handler: Interrupt handler for this ring vector + * @name: Name of the interrupt vector + * @affinity_mask: SMP affinity mask for this vector + * + * This structure ties RX and TX rings to interrupt vectors and a NAPI + * context. This currently only supports one RX and TX ring per + * interrupt vector but might be extended in the future to allow + * association of multiple rings per vector. + */ +struct nfp_net_r_vector { + struct nfp_net *nfp_net; + struct napi_struct napi; + + struct nfp_net_tx_ring *tx_ring; + struct nfp_net_rx_ring *rx_ring; + + int irq_idx; + + struct u64_stats_sync rx_sync; + u64 rx_pkts; + u64 rx_bytes; + u64 rx_drops; + u64 hw_csum_rx_ok; + u64 hw_csum_rx_inner_ok; + u64 hw_csum_rx_error; + + struct u64_stats_sync tx_sync; + u64 tx_pkts; + u64 tx_bytes; + u64 hw_csum_tx; + u64 hw_csum_tx_inner; + u64 tx_gather; + u64 tx_lso; + u64 tx_errors; + u64 tx_busy; + + irq_handler_t handler; + char name[IFNAMSIZ + 8]; + cpumask_t affinity_mask; +} ____cacheline_aligned; + +/* Firmware version as it is written in the 32bit value in the BAR */ +struct nfp_net_fw_version { + u8 minor; + u8 major; + u8 class; + u8 resv; +} __packed; + +static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver, + u8 resv, u8 class, u8 major, u8 minor) +{ + return fw_ver->resv == resv && + fw_ver->class == class && + fw_ver->major == major && + fw_ver->minor == minor; +} + +/** + * struct nfp_net - NFP network device structure + * @pdev: Backpointer to PCI device + * @netdev: Backpointer to net_device structure + * @nfp_fallback: Is the driver used in fallback mode? + * @is_vf: Is the driver attached to a VF? + * @is_nfp3200: Is the driver for a NFP-3200 card? + * @fw_loaded: Is the firmware loaded? + * @ctrl: Local copy of the control register/word. + * @fl_bufsz: Currently configured size of the freelist buffers + * @rx_offset: Offset in the RX buffers where packet data starts + * @cpp: Pointer to the CPP handle + * @nfp_dev_cpp: Pointer to the NFP Device handle + * @ctrl_area: Pointer to the CPP area for the control BAR + * @tx_area: Pointer to the CPP area for the TX queues + * @rx_area: Pointer to the CPP area for the FL/RX queues + * @fw_ver: Firmware version + * @cap: Capabilities advertised by the Firmware + * @max_mtu: Maximum support MTU advertised by the Firmware + * @rss_cfg: RSS configuration + * @rss_key: RSS secret key + * @rss_itbl: RSS indirection table + * @max_tx_rings: Maximum number of TX rings supported by the Firmware + * @max_rx_rings: Maximum number of RX rings supported by the Firmware + * @num_tx_rings: Currently configured number of TX rings + * @num_rx_rings: Currently configured number of RX rings + * @txd_cnt: Size of the TX ring in number of descriptors + * @rxd_cnt: Size of the RX ring in number of descriptors + * @tx_rings: Array of pre-allocated TX ring structures + * @rx_rings: Array of pre-allocated RX ring structures + * @num_irqs: Number of allocated interrupt vectors + * @num_r_vecs: Number of used ring vectors + * @r_vecs: Pre-allocated array of ring vectors + * @irq_entries: Pre-allocated array of MSI-X entries + * @lsc_handler: Handler for Link State Change interrupt + * @lsc_name: Name for Link State Change interrupt + * @exn_handler: Handler for Exception interrupt + * @exn_name: Name for Exception interrupt + * @shared_handler: Handler for shared interrupts + * @shared_name: Name for shared interrupt + * @me_freq_mhz: ME clock_freq (MHz) + * @reconfig_lock: Protects HW reconfiguration request regs/machinery + * @link_up: Is the link up? + * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading + * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter + * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter + * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter + * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter + * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW + * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts + * @qcp_cfg: Pointer to QCP queue used for configuration notification + * @ctrl_bar: Pointer to mapped control BAR + * @tx_bar: Pointer to mapped TX queues + * @rx_bar: Pointer to mapped FL/RX queues + * @debugfs_dir: Device directory in debugfs + */ +struct nfp_net { + struct pci_dev *pdev; + struct net_device *netdev; + + unsigned nfp_fallback:1; + unsigned is_vf:1; + unsigned is_nfp3200:1; + unsigned fw_loaded:1; + + u32 ctrl; + u32 fl_bufsz; + + u32 rx_offset; + +#ifdef CONFIG_PCI_IOV + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; +#endif + + struct nfp_cpp *cpp; + struct platform_device *nfp_dev_cpp; + struct nfp_cpp_area *ctrl_area; + struct nfp_cpp_area *tx_area; + struct nfp_cpp_area *rx_area; + + struct nfp_net_fw_version fw_ver; + u32 cap; + u32 max_mtu; + + u32 rss_cfg; + u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; + u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; + + int max_tx_rings; + int max_rx_rings; + + int num_tx_rings; + int num_rx_rings; + + int stride_tx; + int stride_rx; + + int txd_cnt; + int rxd_cnt; + + struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS]; + struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS]; + + u8 num_irqs; + u8 num_r_vecs; + struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS]; + struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS + + NFP_NET_MAX_TX_RINGS]; + + irq_handler_t lsc_handler; + char lsc_name[IFNAMSIZ + 8]; + + irq_handler_t exn_handler; + char exn_name[IFNAMSIZ + 8]; + + irq_handler_t shared_handler; + char shared_name[IFNAMSIZ + 8]; + + u32 me_freq_mhz; + + bool link_up; + spinlock_t link_status_lock; + + spinlock_t reconfig_lock; + + u32 rx_coalesce_usecs; + u32 rx_coalesce_max_frames; + u32 tx_coalesce_usecs; + u32 tx_coalesce_max_frames; + + __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS]; + u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS]; + + u8 __iomem *qcp_cfg; + + u8 __iomem *ctrl_bar; + u8 __iomem *q_bar; + u8 __iomem *tx_bar; + u8 __iomem *rx_bar; + + struct dentry *debugfs_dir; +}; + +/* Functions to read/write from/to a BAR + * Performs any endian conversion necessary. + */ +static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) +{ + writeb(val, nn->ctrl_bar + off); +} + +/* NFP-3200 can't handle 16-bit accesses too well - hence no readw/writew */ + +static inline u32 nn_readl(struct nfp_net *nn, int off) +{ + return readl(nn->ctrl_bar + off); +} + +static inline void nn_writel(struct nfp_net *nn, int off, u32 val) +{ + writel(val, nn->ctrl_bar + off); +} + +static inline u64 nn_readq(struct nfp_net *nn, int off) +{ + return readq(nn->ctrl_bar + off); +} + +static inline void nn_writeq(struct nfp_net *nn, int off, u64 val) +{ + writeq(val, nn->ctrl_bar + off); +} + +/* Flush posted PCI writes by reading something without side effects */ +static inline void nn_pci_flush(struct nfp_net *nn) +{ + nn_readl(nn, NFP_NET_CFG_VERSION); +} + +/* Queue Controller Peripheral access functions and definitions. + * + * Some of the BARs of the NFP are mapped to portions of the Queue + * Controller Peripheral (QCP) address space on the NFP. A QCP queue + * has a read and a write pointer (as well as a size and flags, + * indicating overflow etc). The QCP offers a number of different + * operation on queue pointers, but here we only offer function to + * either add to a pointer or to read the pointer value. + */ +#define NFP_QCP_QUEUE_ADDR_SZ 0x800 +#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ) +#define NFP_QCP_QUEUE_ADD_RPTR 0x0000 +#define NFP_QCP_QUEUE_ADD_WPTR 0x0004 +#define NFP_QCP_QUEUE_STS_LO 0x0008 +#define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff +#define NFP_QCP_QUEUE_STS_HI 0x000c +#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff + +/* The offset of a QCP queues in the PCIe Target (same on NFP3200 and NFP6000 */ +#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) + +/* nfp_qcp_ptr - Read or Write Pointer of a queue */ +enum nfp_qcp_ptr { + NFP_QCP_READ_PTR = 0, + NFP_QCP_WRITE_PTR +}; + +/* There appear to be an *undocumented* upper limit on the value which + * one can add to a queue and that value is either 0x3f or 0x7f. We + * go with 0x3f as a conservative measure. + */ +#define NFP_QCP_MAX_ADD 0x3f + +static inline void _nfp_qcp_ptr_add(u8 __iomem *q, + enum nfp_qcp_ptr ptr, u32 val) +{ + u32 off; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_ADD_RPTR; + else + off = NFP_QCP_QUEUE_ADD_WPTR; + + while (val > NFP_QCP_MAX_ADD) { + writel(NFP_QCP_MAX_ADD, q + off); + val -= NFP_QCP_MAX_ADD; + } + + writel(val, q + off); +} + +/** + * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue + * + * @q: Base address for queue structure + * @val: Value to add to the queue pointer + * + * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. + */ +static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val) +{ + _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val); +} + +/** + * nfp_qcp_wr_ptr_add() - Add the value to the write pointer of a queue + * + * @q: Base address for queue structure + * @val: Value to add to the queue pointer + * + * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. + */ +static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val) +{ + _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val); +} + +static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr) +{ + u32 off; + u32 val; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_STS_LO; + else + off = NFP_QCP_QUEUE_STS_HI; + + val = readl(q + off); + + if (ptr == NFP_QCP_READ_PTR) + return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; + else + return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; +} + +/** + * nfp_qcp_rd_ptr_read() - Read the current read pointer value for a queue + * @q: Base address for queue structure + * + * Return: Value read. + */ +static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q) +{ + return _nfp_qcp_read(q, NFP_QCP_READ_PTR); +} + +/** + * nfp_qcp_wr_ptr_read() - Read the current write pointer value for a queue + * @q: Base address for queue structure + * + * Return: Value read. + */ +static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) +{ + return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR); +} + +/* Globals */ +extern const char nfp_net_driver_name[]; +extern const char nfp_net_driver_version[]; + +/* Prototypes */ +void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, + void __iomem *ctrl_bar); + +struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, + int max_tx_rings, int max_rx_rings); +void nfp_net_netdev_free(struct nfp_net *nn); +int nfp_net_netdev_init(struct net_device *netdev); +void nfp_net_netdev_clean(struct net_device *netdev); +void nfp_net_set_ethtool_ops(struct net_device *netdev); +void nfp_net_info(struct nfp_net *nn); +int nfp_net_reconfig(struct nfp_net *nn, u32 update); +void nfp_net_rss_write_itbl(struct nfp_net *nn); +void nfp_net_rss_write_key(struct nfp_net *nn); +void nfp_net_coalesce_write_cfg(struct nfp_net *nn); +int nfp_net_irqs_alloc(struct nfp_net *nn); +void nfp_net_irqs_disable(struct nfp_net *nn); + +#ifdef CONFIG_NFP_NET_DEBUG +void nfp_net_debugfs_create(void); +void nfp_net_debugfs_destroy(void); +void nfp_net_debugfs_adapter_add(struct nfp_net *nn); +void nfp_net_debugfs_adapter_del(struct nfp_net *nn); +#else +static inline void nfp_net_debugfs_create(void) +{ +} + +static inline void nfp_net_debugfs_destroy(void) +{ +} + +static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +{ +} + +static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +{ +} +#endif /* CONFIG_NFP_NET_DEBUG */ + +#endif /* _NFP_NET_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c new file mode 100644 index 000000000000..43c618bafdb6 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -0,0 +1,2435 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_common.c + * Netronome network device driver: Common functions between PF and VF + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * Brad Petrus <brad.petrus@netronome.com> + * Chris Telfer <chris.telfer@netronome.com> + */ + +#include <linux/version.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/msi.h> +#include <linux/ethtool.h> +#include <linux/log2.h> +#include <linux/if_vlan.h> +#include <linux/random.h> + +#include <linux/ktime.h> + +#include <net/vxlan.h> + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" + +/** + * nfp_net_get_fw_version() - Read and parse the FW version + * @fw_ver: Output fw_version structure to read to + * @ctrl_bar: Mapped address of the control BAR + */ +void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, + void __iomem *ctrl_bar) +{ + u32 reg; + + reg = readl(ctrl_bar + NFP_NET_CFG_VERSION); + put_unaligned_le32(reg, fw_ver); +} + +/** + * nfp_net_reconfig() - Reconfigure the firmware + * @nn: NFP Net device to reconfigure + * @update: The value for the update field in the BAR config + * + * Write the update word to the BAR and ping the reconfig queue. The + * poll until the firmware has acknowledged the update by zeroing the + * update word. + * + * Return: Negative errno on error, 0 on success + */ +int nfp_net_reconfig(struct nfp_net *nn, u32 update) +{ + int cnt, ret = 0; + u32 new; + + spin_lock_bh(&nn->reconfig_lock); + + nn_writel(nn, NFP_NET_CFG_UPDATE, update); + /* ensure update is written before pinging HW */ + nn_pci_flush(nn); + nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); + + /* Poll update field, waiting for NFP to ack the config */ + for (cnt = 0; ; cnt++) { + new = nn_readl(nn, NFP_NET_CFG_UPDATE); + if (new == 0) + break; + if (new & NFP_NET_CFG_UPDATE_ERR) { + nn_err(nn, "Reconfig error: 0x%08x\n", new); + ret = -EIO; + break; + } else if (cnt >= NFP_NET_POLL_TIMEOUT) { + nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n", + update, cnt); + ret = -EIO; + break; + } + mdelay(1); + } + + spin_unlock_bh(&nn->reconfig_lock); + return ret; +} + +/* Interrupt configuration and handling + */ + +/** + * nfp_net_irq_unmask_msix() - Unmask MSI-X after automasking + * @nn: NFP Network structure + * @entry_nr: MSI-X table entry + * + * Clear the MSI-X table mask bit for the given entry bypassing Linux irq + * handling subsystem. Use *only* to reenable automasked vectors. + */ +static void nfp_net_irq_unmask_msix(struct nfp_net *nn, unsigned int entry_nr) +{ + struct list_head *msi_head = &nn->pdev->dev.msi_list; + struct msi_desc *entry; + u32 off; + + /* All MSI-Xs have the same mask_base */ + entry = list_first_entry(msi_head, struct msi_desc, list); + + off = (PCI_MSIX_ENTRY_SIZE * entry_nr) + + PCI_MSIX_ENTRY_VECTOR_CTRL; + writel(0, entry->mask_base + off); + readl(entry->mask_base); +} + +/** + * nfp_net_irq_unmask() - Unmask automasked interrupt + * @nn: NFP Network structure + * @entry_nr: MSI-X table entry + * + * If MSI-X auto-masking is enabled clear the mask bit, otherwise + * clear the ICR for the entry. + */ +static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) +{ + if (nn->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { + nfp_net_irq_unmask_msix(nn, entry_nr); + return; + } + + nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED); + nn_pci_flush(nn); +} + +/** + * nfp_net_msix_alloc() - Try to allocate MSI-X irqs + * @nn: NFP Network structure + * @nr_vecs: Number of MSI-X vectors to allocate + * + * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors. + * + * Return: Number of MSI-X vectors obtained or 0 on error. + */ +static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs) +{ + struct pci_dev *pdev = nn->pdev; + int nvecs; + int i; + + for (i = 0; i < nr_vecs; i++) + nn->irq_entries[i].entry = i; + + nvecs = pci_enable_msix_range(pdev, nn->irq_entries, + NFP_NET_NON_Q_VECTORS + 1, nr_vecs); + if (nvecs < 0) { + nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n", + NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs); + return 0; + } + + return nvecs; +} + +/** + * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want + * @nn: NFP Network structure + * + * We want a vector per CPU (or ring), whatever is smaller plus + * NFP_NET_NON_Q_VECTORS for LSC etc. + * + * Return: Number of interrupts wanted + */ +static int nfp_net_irqs_wanted(struct nfp_net *nn) +{ + int ncpus; + int vecs; + + ncpus = num_online_cpus(); + + vecs = max_t(int, nn->num_tx_rings, nn->num_rx_rings); + vecs = min_t(int, vecs, ncpus); + + return vecs + NFP_NET_NON_Q_VECTORS; +} + +/** + * nfp_net_irqs_alloc() - allocates MSI-X irqs + * @nn: NFP Network structure + * + * Return: Number of irqs obtained or 0 on error. + */ +int nfp_net_irqs_alloc(struct nfp_net *nn) +{ + int wanted_irqs; + + wanted_irqs = nfp_net_irqs_wanted(nn); + + nn->num_irqs = nfp_net_msix_alloc(nn, wanted_irqs); + if (nn->num_irqs == 0) { + nn_err(nn, "Failed to allocate MSI-X IRQs\n"); + return 0; + } + + nn->num_r_vecs = nn->num_irqs - NFP_NET_NON_Q_VECTORS; + + if (nn->num_irqs < wanted_irqs) + nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n", + wanted_irqs, nn->num_irqs); + + return nn->num_irqs; +} + +/** + * nfp_net_irqs_disable() - Disable interrupts + * @nn: NFP Network structure + * + * Undoes what @nfp_net_irqs_alloc() does. + */ +void nfp_net_irqs_disable(struct nfp_net *nn) +{ + pci_disable_msix(nn->pdev); +} + +/** + * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings. + * @irq: Interrupt + * @data: Opaque data structure + * + * Return: Indicate if the interrupt has been handled. + */ +static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) +{ + struct nfp_net_r_vector *r_vec = data; + + napi_schedule_irqoff(&r_vec->napi); + + /* The FW auto-masks any interrupt, either via the MASK bit in + * the MSI-X table or via the per entry ICR field. So there + * is no need to disable interrupts here. + */ + return IRQ_HANDLED; +} + +/** + * nfp_net_read_link_status() - Reread link status from control BAR + * @nn: NFP Network structure + */ +static void nfp_net_read_link_status(struct nfp_net *nn) +{ + unsigned long flags; + bool link_up; + u32 sts; + + spin_lock_irqsave(&nn->link_status_lock, flags); + + sts = nn_readl(nn, NFP_NET_CFG_STS); + link_up = !!(sts & NFP_NET_CFG_STS_LINK); + + if (nn->link_up == link_up) + goto out; + + nn->link_up = link_up; + + if (nn->link_up) { + netif_carrier_on(nn->netdev); + netdev_info(nn->netdev, "NIC Link is Up\n"); + } else { + netif_carrier_off(nn->netdev); + netdev_info(nn->netdev, "NIC Link is Down\n"); + } +out: + spin_unlock_irqrestore(&nn->link_status_lock, flags); +} + +/** + * nfp_net_irq_lsc() - Interrupt service routine for link state changes + * @irq: Interrupt + * @data: Opaque data structure + * + * Return: Indicate if the interrupt has been handled. + */ +static irqreturn_t nfp_net_irq_lsc(int irq, void *data) +{ + struct nfp_net *nn = data; + + nfp_net_read_link_status(nn); + + nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX); + + return IRQ_HANDLED; +} + +/** + * nfp_net_irq_exn() - Interrupt service routine for exceptions + * @irq: Interrupt + * @data: Opaque data structure + * + * Return: Indicate if the interrupt has been handled. + */ +static irqreturn_t nfp_net_irq_exn(int irq, void *data) +{ + struct nfp_net *nn = data; + + nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__); + /* XXX TO BE IMPLEMENTED */ + return IRQ_HANDLED; +} + +/** + * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring + * @tx_ring: TX ring structure + */ +static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + + tx_ring->qcidx = tx_ring->idx * nn->stride_tx; + tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); +} + +/** + * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring + * @rx_ring: RX ring structure + */ +static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + + rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; + rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1); + + rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); + rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx); +} + +/** + * nfp_net_irqs_assign() - Assign IRQs and setup rvecs. + * @netdev: netdev structure + */ +static void nfp_net_irqs_assign(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + struct nfp_net_r_vector *r_vec; + int r; + + /* Assumes nn->num_tx_rings == nn->num_rx_rings */ + if (nn->num_tx_rings > nn->num_r_vecs) { + nn_warn(nn, "More rings (%d) than vectors (%d).\n", + nn->num_tx_rings, nn->num_r_vecs); + nn->num_tx_rings = nn->num_r_vecs; + nn->num_rx_rings = nn->num_r_vecs; + } + + nn->lsc_handler = nfp_net_irq_lsc; + nn->exn_handler = nfp_net_irq_exn; + + for (r = 0; r < nn->num_r_vecs; r++) { + r_vec = &nn->r_vecs[r]; + r_vec->nfp_net = nn; + r_vec->handler = nfp_net_irq_rxtx; + r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; + + cpumask_set_cpu(r, &r_vec->affinity_mask); + + r_vec->tx_ring = &nn->tx_rings[r]; + nn->tx_rings[r].idx = r; + nn->tx_rings[r].r_vec = r_vec; + nfp_net_tx_ring_init(r_vec->tx_ring); + + r_vec->rx_ring = &nn->rx_rings[r]; + nn->rx_rings[r].idx = r; + nn->rx_rings[r].r_vec = r_vec; + nfp_net_rx_ring_init(r_vec->rx_ring); + } +} + +/** + * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN) + * @nn: NFP Network structure + * @ctrl_offset: Control BAR offset where IRQ configuration should be written + * @format: printf-style format to construct the interrupt name + * @name: Pointer to allocated space for interrupt name + * @name_sz: Size of space for interrupt name + * @vector_idx: Index of MSI-X vector used for this interrupt + * @handler: IRQ handler to register for this interrupt + */ +static int +nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, + const char *format, char *name, size_t name_sz, + unsigned int vector_idx, irq_handler_t handler) +{ + struct msix_entry *entry; + int err; + + entry = &nn->irq_entries[vector_idx]; + + snprintf(name, name_sz, format, netdev_name(nn->netdev)); + err = request_irq(entry->vector, handler, 0, name, nn); + if (err) { + nn_err(nn, "Failed to request IRQ %d (err=%d).\n", + entry->vector, err); + return err; + } + nn_writeb(nn, ctrl_offset, vector_idx); + + return 0; +} + +/** + * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN) + * @nn: NFP Network structure + * @ctrl_offset: Control BAR offset where IRQ configuration should be written + * @vector_idx: Index of MSI-X vector used for this interrupt + */ +static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, + unsigned int vector_idx) +{ + nn_writeb(nn, ctrl_offset, 0xff); + free_irq(nn->irq_entries[vector_idx].vector, nn); +} + +/* Transmit + * + * One queue controller peripheral queue is used for transmit. The + * driver en-queues packets for transmit by advancing the write + * pointer. The device indicates that packets have transmitted by + * advancing the read pointer. The driver maintains a local copy of + * the read and write pointer in @struct nfp_net_tx_ring. The driver + * keeps @wr_p in sync with the queue controller write pointer and can + * determine how many packets have been transmitted by comparing its + * copy of the read pointer @rd_p with the read pointer maintained by + * the queue controller peripheral. + */ + +/** + * nfp_net_tx_full() - Check if the TX ring is full + * @tx_ring: TX ring to check + * @dcnt: Number of descriptors that need to be enqueued (must be >= 1) + * + * This function checks, based on the *host copy* of read/write + * pointer if a given TX ring is full. The real TX queue may have + * some newly made available slots. + * + * Return: True if the ring is full. + */ +static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) +{ + return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); +} + +/* Wrappers for deciding when to stop and restart TX queues */ +static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) +{ + return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); +} + +static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) +{ + return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1); +} + +/** + * nfp_net_tx_ring_stop() - stop tx ring + * @nd_q: netdev queue + * @tx_ring: driver tx queue structure + * + * Safely stop TX ring. Remember that while we are running .start_xmit() + * someone else may be cleaning the TX ring completions so we need to be + * extra careful here. + */ +static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, + struct nfp_net_tx_ring *tx_ring) +{ + netif_tx_stop_queue(nd_q); + + /* We can race with the TX completion out of NAPI so recheck */ + smp_mb(); + if (unlikely(nfp_net_tx_ring_should_wake(tx_ring))) + netif_tx_start_queue(nd_q); +} + +/** + * nfp_net_tx_tso() - Set up Tx descriptor for LSO + * @nn: NFP Net device + * @r_vec: per-ring structure + * @txbuf: Pointer to driver soft TX descriptor + * @txd: Pointer to HW TX descriptor + * @skb: Pointer to SKB + * + * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. + * Return error on packet header greater than maximum supported LSO header size. + */ +static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + struct nfp_net_tx_buf *txbuf, + struct nfp_net_tx_desc *txd, struct sk_buff *skb) +{ + u32 hdrlen; + u16 mss; + + if (!skb_is_gso(skb)) + return; + + if (!skb->encapsulation) + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + else + hdrlen = skb_inner_transport_header(skb) - skb->data + + inner_tcp_hdrlen(skb); + + txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs; + txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); + + mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; + txd->l4_offset = hdrlen; + txd->mss = cpu_to_le16(mss); + txd->flags |= PCIE_DESC_TX_LSO; + + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_lso++; + u64_stats_update_end(&r_vec->tx_sync); +} + +/** + * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor + * @nn: NFP Net device + * @r_vec: per-ring structure + * @txbuf: Pointer to driver soft TX descriptor + * @txd: Pointer to TX descriptor + * @skb: Pointer to SKB + * + * This function sets the TX checksum flags in the TX descriptor based + * on the configuration and the protocol of the packet to be transmitted. + */ +static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + struct nfp_net_tx_buf *txbuf, + struct nfp_net_tx_desc *txd, struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct iphdr *iph; + u8 l4_hdr; + + if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) + return; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return; + + txd->flags |= PCIE_DESC_TX_CSUM; + if (skb->encapsulation) + txd->flags |= PCIE_DESC_TX_ENCAP; + + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { + txd->flags |= PCIE_DESC_TX_IP4_CSUM; + l4_hdr = iph->protocol; + } else if (ipv6h->version == 6) { + l4_hdr = ipv6h->nexthdr; + } else { + nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n", + iph->version); + return; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + txd->flags |= PCIE_DESC_TX_TCP_CSUM; + break; + case IPPROTO_UDP: + txd->flags |= PCIE_DESC_TX_UDP_CSUM; + break; + default: + nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n", + l4_hdr); + return; + } + + u64_stats_update_begin(&r_vec->tx_sync); + if (skb->encapsulation) + r_vec->hw_csum_tx_inner += txbuf->pkt_cnt; + else + r_vec->hw_csum_tx += txbuf->pkt_cnt; + u64_stats_update_end(&r_vec->tx_sync); +} + +/** + * nfp_net_tx() - Main transmit entry point + * @skb: SKB to transmit + * @netdev: netdev structure + * + * Return: NETDEV_TX_OK on success. + */ +static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + const struct skb_frag_struct *frag; + struct nfp_net_r_vector *r_vec; + struct nfp_net_tx_desc *txd, txdg; + struct nfp_net_tx_buf *txbuf; + struct nfp_net_tx_ring *tx_ring; + struct netdev_queue *nd_q; + dma_addr_t dma_addr; + unsigned int fsize; + int f, nr_frags; + int wr_idx; + u16 qidx; + + qidx = skb_get_queue_mapping(skb); + tx_ring = &nn->tx_rings[qidx]; + r_vec = tx_ring->r_vec; + nd_q = netdev_get_tx_queue(nn->netdev, qidx); + + nr_frags = skb_shinfo(skb)->nr_frags; + + if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { + nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n", + qidx, tx_ring->wr_p, tx_ring->rd_p); + netif_tx_stop_queue(nd_q); + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_busy++; + u64_stats_update_end(&r_vec->tx_sync); + return NETDEV_TX_BUSY; + } + + /* Start with the head skbuf */ + dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + goto err_free; + + wr_idx = tx_ring->wr_p % tx_ring->cnt; + + /* Stash the soft descriptor of the head then initialize it */ + txbuf = &tx_ring->txbufs[wr_idx]; + txbuf->skb = skb; + txbuf->dma_addr = dma_addr; + txbuf->fidx = -1; + txbuf->pkt_cnt = 1; + txbuf->real_len = skb->len; + + /* Build TX descriptor */ + txd = &tx_ring->txds[wr_idx]; + txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0; + txd->dma_len = cpu_to_le16(skb_headlen(skb)); + nfp_desc_set_dma_addr(txd, dma_addr); + txd->data_len = cpu_to_le16(skb->len); + + txd->flags = 0; + txd->mss = 0; + txd->l4_offset = 0; + + nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb); + + nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb); + + if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { + txd->flags |= PCIE_DESC_TX_VLAN; + txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + } + + /* Gather DMA */ + if (nr_frags > 0) { + /* all descs must match except for in addr, length and eop */ + txdg = *txd; + + for (f = 0; f < nr_frags; f++) { + frag = &skb_shinfo(skb)->frags[f]; + fsize = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0, + fsize, DMA_TO_DEVICE); + if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + goto err_unmap; + + wr_idx = (wr_idx + 1) % tx_ring->cnt; + tx_ring->txbufs[wr_idx].skb = skb; + tx_ring->txbufs[wr_idx].dma_addr = dma_addr; + tx_ring->txbufs[wr_idx].fidx = f; + + txd = &tx_ring->txds[wr_idx]; + *txd = txdg; + txd->dma_len = cpu_to_le16(fsize); + nfp_desc_set_dma_addr(txd, dma_addr); + txd->offset_eop = + (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; + } + + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_gather++; + u64_stats_update_end(&r_vec->tx_sync); + } + + netdev_tx_sent_queue(nd_q, txbuf->real_len); + + tx_ring->wr_p += nr_frags + 1; + if (nfp_net_tx_ring_should_stop(tx_ring)) + nfp_net_tx_ring_stop(nd_q, tx_ring); + + tx_ring->wr_ptr_add += nr_frags + 1; + if (!skb->xmit_more || netif_xmit_stopped(nd_q)) { + /* force memory write before we let HW know */ + wmb(); + nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); + tx_ring->wr_ptr_add = 0; + } + + skb_tx_timestamp(skb); + + return NETDEV_TX_OK; + +err_unmap: + --f; + while (f >= 0) { + frag = &skb_shinfo(skb)->frags[f]; + dma_unmap_page(&nn->pdev->dev, + tx_ring->txbufs[wr_idx].dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); + tx_ring->txbufs[wr_idx].skb = NULL; + tx_ring->txbufs[wr_idx].dma_addr = 0; + tx_ring->txbufs[wr_idx].fidx = -2; + wr_idx = wr_idx - 1; + if (wr_idx < 0) + wr_idx += tx_ring->cnt; + } + dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + tx_ring->txbufs[wr_idx].skb = NULL; + tx_ring->txbufs[wr_idx].dma_addr = 0; + tx_ring->txbufs[wr_idx].fidx = -2; +err_free: + nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n"); + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_errors++; + u64_stats_update_end(&r_vec->tx_sync); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * nfp_net_tx_complete() - Handled completed TX packets + * @tx_ring: TX ring structure + * + * Return: Number of completed TX descriptors + */ +static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + const struct skb_frag_struct *frag; + struct netdev_queue *nd_q; + u32 done_pkts = 0, done_bytes = 0; + struct sk_buff *skb; + int todo, nr_frags; + u32 qcp_rd_p; + int fidx; + int idx; + + /* Work out how many descriptors have been transmitted */ + qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); + + if (qcp_rd_p == tx_ring->qcp_rd_p) + return; + + if (qcp_rd_p > tx_ring->qcp_rd_p) + todo = qcp_rd_p - tx_ring->qcp_rd_p; + else + todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p; + + while (todo--) { + idx = tx_ring->rd_p % tx_ring->cnt; + tx_ring->rd_p++; + + skb = tx_ring->txbufs[idx].skb; + if (!skb) + continue; + + nr_frags = skb_shinfo(skb)->nr_frags; + fidx = tx_ring->txbufs[idx].fidx; + + if (fidx == -1) { + /* unmap head */ + dma_unmap_single(&nn->pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + + done_pkts += tx_ring->txbufs[idx].pkt_cnt; + done_bytes += tx_ring->txbufs[idx].real_len; + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[fidx]; + dma_unmap_page(&nn->pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); + } + + /* check for last gather fragment */ + if (fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + + tx_ring->txbufs[idx].dma_addr = 0; + tx_ring->txbufs[idx].skb = NULL; + tx_ring->txbufs[idx].fidx = -2; + } + + tx_ring->qcp_rd_p = qcp_rd_p; + + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_bytes += done_bytes; + r_vec->tx_pkts += done_pkts; + u64_stats_update_end(&r_vec->tx_sync); + + nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); + if (nfp_net_tx_ring_should_wake(tx_ring)) { + /* Make sure TX thread will see updated tx_ring->rd_p */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(nd_q))) + netif_tx_wake_queue(nd_q); + } + + WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, + "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", + tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); +} + +/** + * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring + * @tx_ring: TX ring structure + * + * Assumes that the device is stopped + */ +static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + const struct skb_frag_struct *frag; + struct netdev_queue *nd_q; + struct sk_buff *skb; + int nr_frags; + int fidx; + int idx; + + while (tx_ring->rd_p != tx_ring->wr_p) { + idx = tx_ring->rd_p % tx_ring->cnt; + + skb = tx_ring->txbufs[idx].skb; + if (skb) { + nr_frags = skb_shinfo(skb)->nr_frags; + fidx = tx_ring->txbufs[idx].fidx; + + if (fidx == -1) { + /* unmap head */ + dma_unmap_single(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_headlen(skb), + DMA_TO_DEVICE); + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[fidx]; + dma_unmap_page(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_frag_size(frag), + DMA_TO_DEVICE); + } + + /* check for last gather fragment */ + if (fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + + tx_ring->txbufs[idx].dma_addr = 0; + tx_ring->txbufs[idx].skb = NULL; + tx_ring->txbufs[idx].fidx = -2; + } + + memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx])); + + tx_ring->qcp_rd_p++; + tx_ring->rd_p++; + } + + nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + netdev_tx_reset_queue(nd_q); +} + +static void nfp_net_tx_timeout(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + for (i = 0; i < nn->num_tx_rings; i++) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) + continue; + nn_warn(nn, "TX timeout on ring: %d\n", i); + } + nn_warn(nn, "TX watchdog timeout\n"); +} + +/* Receive processing + */ + +/** + * nfp_net_rx_space() - return the number of free slots on the RX ring + * @rx_ring: RX ring structure + * + * Make sure we leave at least one slot free. + * + * Return: True if there is space on the RX ring + */ +static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring) +{ + return (rx_ring->cnt - 1) - (rx_ring->wr_p - rx_ring->rd_p); +} + +/** + * nfp_net_rx_alloc_one() - Allocate and map skb for RX + * @rx_ring: RX ring structure of the skb + * @dma_addr: Pointer to storage for DMA address (output param) + * + * This function will allcate a new skb, map it for DMA. + * + * Return: allocated skb or NULL on failure. + */ +static struct sk_buff * +nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr) +{ + struct nfp_net *nn = rx_ring->r_vec->nfp_net; + struct sk_buff *skb; + + skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz); + if (!skb) { + nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n"); + return NULL; + } + + *dma_addr = dma_map_single(&nn->pdev->dev, skb->data, + nn->fl_bufsz, DMA_FROM_DEVICE); + if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { + dev_kfree_skb_any(skb); + nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + return NULL; + } + + return skb; +} + +/** + * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings + * @rx_ring: RX ring structure + * @skb: Skb to put on rings + * @dma_addr: DMA address of skb mapping + */ +static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + unsigned int wr_idx; + + wr_idx = rx_ring->wr_p % rx_ring->cnt; + + /* Stash SKB and DMA address away */ + rx_ring->rxbufs[wr_idx].skb = skb; + rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; + + /* Fill freelist descriptor */ + rx_ring->rxds[wr_idx].fld.reserved = 0; + rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; + nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr); + + rx_ring->wr_p++; + rx_ring->wr_ptr_add++; + if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) { + /* Update write pointer of the freelist queue. Make + * sure all writes are flushed before telling the hardware. + */ + wmb(); + nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add); + rx_ring->wr_ptr_add = 0; + } +} + +/** + * nfp_net_rx_flush() - Free any buffers currently on the RX ring + * @rx_ring: RX ring to remove buffers from + * + * Assumes that the device is stopped + */ +static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net *nn = rx_ring->r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + int idx; + + while (rx_ring->rd_p != rx_ring->wr_p) { + idx = rx_ring->rd_p % rx_ring->cnt; + + if (rx_ring->rxbufs[idx].skb) { + dma_unmap_single(&pdev->dev, + rx_ring->rxbufs[idx].dma_addr, + nn->fl_bufsz, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_ring->rxbufs[idx].skb); + rx_ring->rxbufs[idx].dma_addr = 0; + rx_ring->rxbufs[idx].skb = NULL; + } + + memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx])); + + rx_ring->rd_p++; + } +} + +/** + * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers + * @rx_ring: RX ring to fill + * + * Try to fill as many buffers as possible into freelist. Return + * number of buffers added. + * + * Return: Number of freelist buffers added. + */ +static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring) +{ + struct sk_buff *skb; + dma_addr_t dma_addr; + + while (nfp_net_rx_space(rx_ring)) { + skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr); + if (!skb) { + nfp_net_rx_flush(rx_ring); + return -ENOMEM; + } + nfp_net_rx_give_one(rx_ring, skb, dma_addr); + } + + return 0; +} + +/** + * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors + * @flags: RX descriptor flags field in CPU byte order + */ +static int nfp_net_rx_csum_has_errors(u16 flags) +{ + u16 csum_all_checked, csum_all_ok; + + csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL; + csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK; + + return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT); +} + +/** + * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags + * @nn: NFP Net device + * @r_vec: per-ring structure + * @rxd: Pointer to RX descriptor + * @skb: Pointer to SKB + */ +static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + struct nfp_net_rx_desc *rxd, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + if (!(nn->netdev->features & NETIF_F_RXCSUM)) + return; + + if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->hw_csum_rx_error++; + u64_stats_update_end(&r_vec->rx_sync); + return; + } + + /* Assume that the firmware will never report inner CSUM_OK unless outer + * L4 headers were successfully parsed. FW will always report zero UDP + * checksum as CSUM_OK. + */ + if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK || + rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) { + __skb_incr_checksum_unnecessary(skb); + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->hw_csum_rx_ok++; + u64_stats_update_end(&r_vec->rx_sync); + } + + if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK || + rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) { + __skb_incr_checksum_unnecessary(skb); + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->hw_csum_rx_inner_ok++; + u64_stats_update_end(&r_vec->rx_sync); + } +} + +/** + * nfp_net_set_hash() - Set SKB hash data + * @netdev: adapter's net_device structure + * @skb: SKB to set the hash data on + * @rxd: RX descriptor + * + * The RSS hash and hash-type are pre-pended to the packet data. + * Extract and decode it and set the skb fields. + */ +static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, + struct nfp_net_rx_desc *rxd) +{ + struct nfp_net_rx_hash *rx_hash; + + if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) || + !(netdev->features & NETIF_F_RXHASH)) + return; + + rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash)); + + switch (be32_to_cpu(rx_hash->hash_type)) { + case NFP_NET_RSS_IPV4: + case NFP_NET_RSS_IPV6: + case NFP_NET_RSS_IPV6_EX: + skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3); + break; + default: + skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4); + break; + } +} + +/** + * nfp_net_rx() - receive up to @budget packets on @rx_ring + * @rx_ring: RX ring to receive from + * @budget: NAPI budget + * + * Note, this function is separated out from the napi poll function to + * more cleanly separate packet receive code from other bookkeeping + * functions performed in the napi poll function. + * + * There are differences between the NFP-3200 firmware and the + * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue + * to indicate that new packets have arrived. The NFP-6000 does not + * have this queue and uses the DD bit in the RX descriptor. This + * method cannot be used on the NFP-3200 as it causes a race + * condition: The RX ring write pointer on the NFP-3200 is updated + * after packets (and descriptors) have been DMAed. If the DD bit is + * used and subsequently the read pointer is updated this may lead to + * the RX queue to underflow (if the firmware has not yet update the + * write pointer). Therefore we use slightly ugly conditional code + * below to handle the differences. We may, in the future update the + * NFP-3200 firmware to behave the same as the firmware on the + * NFP-6000. + * + * Return: Number of packets received. + */ +static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + unsigned int data_len, meta_len; + int avail = 0, pkts_polled = 0; + struct sk_buff *skb, *new_skb; + struct nfp_net_rx_desc *rxd; + dma_addr_t new_dma_addr; + u32 qcp_wr_p; + int idx; + + if (nn->is_nfp3200) { + /* Work out how many packets arrived */ + qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); + idx = rx_ring->rd_p % rx_ring->cnt; + + if (qcp_wr_p == idx) + /* No new packets */ + return 0; + + if (qcp_wr_p > idx) + avail = qcp_wr_p - idx; + else + avail = qcp_wr_p + rx_ring->cnt - idx; + } else { + avail = budget + 1; + } + + while (avail > 0 && pkts_polled < budget) { + idx = rx_ring->rd_p % rx_ring->cnt; + + rxd = &rx_ring->rxds[idx]; + if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) { + if (nn->is_nfp3200) + nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n", + rx_ring->idx, idx, + rxd->vals[0], rxd->vals[1]); + break; + } + /* Memory barrier to ensure that we won't do other reads + * before the DD bit. + */ + dma_rmb(); + + rx_ring->rd_p++; + pkts_polled++; + avail--; + + skb = rx_ring->rxbufs[idx].skb; + + new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr); + if (!new_skb) { + nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb, + rx_ring->rxbufs[idx].dma_addr); + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->rx_drops++; + u64_stats_update_end(&r_vec->rx_sync); + continue; + } + + dma_unmap_single(&nn->pdev->dev, + rx_ring->rxbufs[idx].dma_addr, + nn->fl_bufsz, DMA_FROM_DEVICE); + + nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr); + + meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; + data_len = le16_to_cpu(rxd->rxd.data_len); + + if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) { + dev_kfree_skb_any(skb); + continue; + } + + if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) { + /* The packet data starts after the metadata */ + skb_reserve(skb, meta_len); + } else { + /* The packet data starts at a fixed offset */ + skb_reserve(skb, nn->rx_offset); + } + + /* Adjust the SKB for the dynamic meta data pre-pended */ + skb_put(skb, data_len - meta_len); + + nfp_net_set_hash(nn->netdev, skb, rxd); + + /* Pad small frames to minimum */ + if (skb_put_padto(skb, 60)) + break; + + /* Stats update */ + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->rx_pkts++; + r_vec->rx_bytes += skb->len; + u64_stats_update_end(&r_vec->rx_sync); + + skb_record_rx_queue(skb, rx_ring->idx); + skb->protocol = eth_type_trans(skb, nn->netdev); + + nfp_net_rx_csum(nn, r_vec, rxd, skb); + + if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rxd->rxd.vlan)); + + napi_gro_receive(&rx_ring->r_vec->napi, skb); + } + + if (nn->is_nfp3200) + nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled); + + return pkts_polled; +} + +/** + * nfp_net_poll() - napi poll function + * @napi: NAPI structure + * @budget: NAPI budget + * + * Return: number of packets polled. + */ +static int nfp_net_poll(struct napi_struct *napi, int budget) +{ + struct nfp_net_r_vector *r_vec = + container_of(napi, struct nfp_net_r_vector, napi); + struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; + struct nfp_net_tx_ring *tx_ring = r_vec->tx_ring; + struct nfp_net *nn = r_vec->nfp_net; + struct netdev_queue *txq; + unsigned int pkts_polled; + + tx_ring = &nn->tx_rings[rx_ring->idx]; + txq = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nfp_net_tx_complete(tx_ring); + + pkts_polled = nfp_net_rx(rx_ring, budget); + + if (pkts_polled < budget) { + napi_complete_done(napi, pkts_polled); + nfp_net_irq_unmask(nn, r_vec->irq_idx); + } + + return pkts_polled; +} + +/* Setup and Configuration + */ + +/** + * nfp_net_tx_ring_free() - Free resources allocated to a TX ring + * @tx_ring: TX ring to free + */ +static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0); + + kfree(tx_ring->txbufs); + + if (tx_ring->txds) + dma_free_coherent(&pdev->dev, tx_ring->size, + tx_ring->txds, tx_ring->dma); + + tx_ring->cnt = 0; + tx_ring->wr_p = 0; + tx_ring->rd_p = 0; + tx_ring->qcp_rd_p = 0; + tx_ring->wr_ptr_add = 0; + + tx_ring->txbufs = NULL; + tx_ring->txds = NULL; + tx_ring->dma = 0; + tx_ring->size = 0; +} + +/** + * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring + * @tx_ring: TX Ring structure to allocate + * + * Return: 0 on success, negative errno otherwise. + */ +static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + int sz; + + tx_ring->cnt = nn->txd_cnt; + + tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; + tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->txds) + goto err_alloc; + + sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt; + tx_ring->txbufs = kzalloc(sz, GFP_KERNEL); + if (!tx_ring->txbufs) + goto err_alloc; + + /* Write the DMA address, size and MSI-X info to the device */ + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx); + + netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx); + + nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n", + tx_ring->idx, tx_ring->qcidx, + tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds); + + return 0; + +err_alloc: + nfp_net_tx_ring_free(tx_ring); + return -ENOMEM; +} + +/** + * nfp_net_rx_ring_free() - Free resources allocated to a RX ring + * @rx_ring: RX ring to free + */ +static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0); + + kfree(rx_ring->rxbufs); + + if (rx_ring->rxds) + dma_free_coherent(&pdev->dev, rx_ring->size, + rx_ring->rxds, rx_ring->dma); + + rx_ring->cnt = 0; + rx_ring->wr_p = 0; + rx_ring->rd_p = 0; + rx_ring->wr_ptr_add = 0; + + rx_ring->rxbufs = NULL; + rx_ring->rxds = NULL; + rx_ring->dma = 0; + rx_ring->size = 0; +} + +/** + * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring + * @rx_ring: RX ring to allocate + * + * Return: 0 on success, negative errno otherwise. + */ +static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net *nn = r_vec->nfp_net; + struct pci_dev *pdev = nn->pdev; + int sz; + + rx_ring->cnt = nn->rxd_cnt; + + rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; + rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->rxds) + goto err_alloc; + + sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt; + rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL); + if (!rx_ring->rxbufs) + goto err_alloc; + + /* Write the DMA address, size and MSI-X info to the device */ + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx); + + nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n", + rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx, + rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds); + + return 0; + +err_alloc: + nfp_net_rx_ring_free(rx_ring); + return -ENOMEM; +} + +static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free) +{ + struct nfp_net_r_vector *r_vec; + struct msix_entry *entry; + + while (n_free--) { + r_vec = &nn->r_vecs[n_free]; + entry = &nn->irq_entries[r_vec->irq_idx]; + + nfp_net_rx_ring_free(r_vec->rx_ring); + nfp_net_tx_ring_free(r_vec->tx_ring); + + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, r_vec); + + netif_napi_del(&r_vec->napi); + } +} + +/** + * nfp_net_free_rings() - Free all ring resources + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_free_rings(struct nfp_net *nn) +{ + __nfp_net_free_rings(nn, nn->num_r_vecs); +} + +/** + * nfp_net_alloc_rings() - Allocate resources for RX and TX rings + * @nn: NFP Net device to reconfigure + * + * Return: 0 on success or negative errno on error. + */ +static int nfp_net_alloc_rings(struct nfp_net *nn) +{ + struct nfp_net_r_vector *r_vec; + struct msix_entry *entry; + int err; + int r; + + for (r = 0; r < nn->num_r_vecs; r++) { + r_vec = &nn->r_vecs[r]; + entry = &nn->irq_entries[r_vec->irq_idx]; + + /* Setup NAPI */ + netif_napi_add(nn->netdev, &r_vec->napi, + nfp_net_poll, NAPI_POLL_WEIGHT); + + snprintf(r_vec->name, sizeof(r_vec->name), + "%s-rxtx-%d", nn->netdev->name, r); + err = request_irq(entry->vector, r_vec->handler, 0, + r_vec->name, r_vec); + if (err) { + nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector); + goto err_napi_del; + } + + irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); + + nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", + r, entry->vector, entry->entry); + + /* Allocate TX ring resources */ + err = nfp_net_tx_ring_alloc(r_vec->tx_ring); + if (err) + goto err_free_irq; + + /* Allocate RX ring resources */ + err = nfp_net_rx_ring_alloc(r_vec->rx_ring); + if (err) + goto err_free_tx; + } + + return 0; + +err_free_tx: + nfp_net_tx_ring_free(r_vec->tx_ring); +err_free_irq: + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, r_vec); +err_napi_del: + netif_napi_del(&r_vec->napi); + __nfp_net_free_rings(nn, r); + return err; +} + +/** + * nfp_net_rss_write_itbl() - Write RSS indirection table to device + * @nn: NFP Net device to reconfigure + */ +void nfp_net_rss_write_itbl(struct nfp_net *nn) +{ + int i; + + for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4) + nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i, + get_unaligned_le32(nn->rss_itbl + i)); +} + +/** + * nfp_net_rss_write_key() - Write RSS hash key to device + * @nn: NFP Net device to reconfigure + */ +void nfp_net_rss_write_key(struct nfp_net *nn) +{ + int i; + + for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4) + nn_writel(nn, NFP_NET_CFG_RSS_KEY + i, + get_unaligned_le32(nn->rss_key + i)); +} + +/** + * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW + * @nn: NFP Net device to reconfigure + */ +void nfp_net_coalesce_write_cfg(struct nfp_net *nn) +{ + u8 i; + u32 factor; + u32 value; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->me_freq_mhz / 16; + + /* copy RX interrupt coalesce parameters */ + value = (nn->rx_coalesce_max_frames << 16) | + (factor * nn->rx_coalesce_usecs); + for (i = 0; i < nn->num_r_vecs; i++) + nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); + + /* copy TX interrupt coalesce parameters */ + value = (nn->tx_coalesce_max_frames << 16) | + (factor * nn->tx_coalesce_usecs); + for (i = 0; i < nn->num_r_vecs; i++) + nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); +} + +/** + * nfp_net_write_mac_addr() - Write mac address to device registers + * @nn: NFP Net device to reconfigure + * @mac: Six-byte MAC address to be written + * + * We do a bit of byte swapping dance because firmware is LE. + */ +static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) +{ + nn_writel(nn, NFP_NET_CFG_MACADDR + 0, + get_unaligned_be32(nn->netdev->dev_addr)); + /* We can't do writew for NFP-3200 compatibility */ + nn_writel(nn, NFP_NET_CFG_MACADDR + 4, + get_unaligned_be16(nn->netdev->dev_addr + 4) << 16); +} + +/** + * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_clear_config_and_disable(struct nfp_net *nn) +{ + u32 new_ctrl, update; + int err; + + new_ctrl = nn->ctrl; + new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; + update = NFP_NET_CFG_UPDATE_GEN; + update |= NFP_NET_CFG_UPDATE_MSIX; + update |= NFP_NET_CFG_UPDATE_RING; + + if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; + + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); + + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + err = nfp_net_reconfig(nn, update); + if (err) { + nn_err(nn, "Could not disable device: %d\n", err); + return; + } + + nn->ctrl = new_ctrl; +} + +/** + * nfp_net_start_vec() - Start ring vector + * @nn: NFP Net device structure + * @r_vec: Ring vector to be started + */ +static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) +{ + unsigned int irq_vec; + int err = 0; + + irq_vec = nn->irq_entries[r_vec->irq_idx].vector; + + disable_irq(irq_vec); + + err = nfp_net_rx_fill_freelist(r_vec->rx_ring); + if (err) { + nn_err(nn, "RV%02d: couldn't allocate enough buffers\n", + r_vec->irq_idx); + goto out; + } + + napi_enable(&r_vec->napi); +out: + enable_irq(irq_vec); + + return err; +} + +static int nfp_net_netdev_open(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int err, r; + u32 update = 0; + u32 new_ctrl; + + if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { + nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); + return -EBUSY; + } + + new_ctrl = nn->ctrl; + + /* Step 1: Allocate resources for rings and the like + * - Request interrupts + * - Allocate RX and TX ring resources + * - Setup initial RSS table + */ + err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn", + nn->exn_name, sizeof(nn->exn_name), + NFP_NET_IRQ_EXN_IDX, nn->exn_handler); + if (err) + return err; + + err = nfp_net_alloc_rings(nn); + if (err) + goto err_free_exn; + + err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); + if (err) + goto err_free_rings; + + err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); + if (err) + goto err_free_rings; + + if (nn->cap & NFP_NET_CFG_CTRL_RSS) { + nfp_net_rss_write_key(nn); + nfp_net_rss_write_itbl(nn); + nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg); + update |= NFP_NET_CFG_UPDATE_RSS; + } + + if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { + nfp_net_coalesce_write_cfg(nn); + + new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + update |= NFP_NET_CFG_UPDATE_IRQMOD; + } + + /* Step 2: Configure the NFP + * - Enable rings from 0 to tx_rings/rx_rings - 1. + * - Write MAC address (in case it changed) + * - Set the MTU + * - Set the Freelist buffer size + * - Enable the FW + */ + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); + + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); + + nfp_net_write_mac_addr(nn, netdev->dev_addr); + + nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu); + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); + + /* Enable device */ + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; + update |= NFP_NET_CFG_UPDATE_GEN; + update |= NFP_NET_CFG_UPDATE_MSIX; + update |= NFP_NET_CFG_UPDATE_RING; + if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + err = nfp_net_reconfig(nn, update); + if (err) + goto err_clear_config; + + nn->ctrl = new_ctrl; + + /* Since reconfiguration requests while NFP is down are ignored we + * have to wipe the entire VXLAN configuration and reinitialize it. + */ + if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { + memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); + memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); + vxlan_get_rx_port(netdev); + } + + /* Step 3: Enable for kernel + * - put some freelist descriptors on each RX ring + * - enable NAPI on each ring + * - enable all TX queues + * - set link state + */ + for (r = 0; r < nn->num_r_vecs; r++) { + err = nfp_net_start_vec(nn, &nn->r_vecs[r]); + if (err) + goto err_disable_napi; + } + + netif_tx_wake_all_queues(netdev); + + err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", + nn->lsc_name, sizeof(nn->lsc_name), + NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); + if (err) + goto err_stop_tx; + nfp_net_read_link_status(nn); + + return 0; + +err_stop_tx: + netif_tx_disable(netdev); + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_tx_flush(nn->r_vecs[r].tx_ring); +err_disable_napi: + while (r--) { + napi_disable(&nn->r_vecs[r].napi); + nfp_net_rx_flush(nn->r_vecs[r].rx_ring); + } +err_clear_config: + nfp_net_clear_config_and_disable(nn); +err_free_rings: + nfp_net_free_rings(nn); +err_free_exn: + nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); + return err; +} + +/** + * nfp_net_netdev_close() - Called when the device is downed + * @netdev: netdev structure + */ +static int nfp_net_netdev_close(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int r; + + if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { + nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); + return 0; + } + + /* Step 1: Disable RX and TX rings from the Linux kernel perspective + */ + nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + netif_carrier_off(netdev); + nn->link_up = false; + + for (r = 0; r < nn->num_r_vecs; r++) + napi_disable(&nn->r_vecs[r].napi); + + netif_tx_disable(netdev); + + /* Step 2: Tell NFP + */ + nfp_net_clear_config_and_disable(nn); + + /* Step 3: Free resources + */ + for (r = 0; r < nn->num_r_vecs; r++) { + nfp_net_rx_flush(nn->r_vecs[r].rx_ring); + nfp_net_tx_flush(nn->r_vecs[r].tx_ring); + } + + nfp_net_free_rings(nn); + nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); + + nn_dbg(nn, "%s down", netdev->name); + return 0; +} + +static void nfp_net_set_rx_mode(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 new_ctrl; + + new_ctrl = nn->ctrl; + + if (netdev->flags & IFF_PROMISC) { + if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) + new_ctrl |= NFP_NET_CFG_CTRL_PROMISC; + else + nn_warn(nn, "FW does not support promiscuous mode\n"); + } else { + new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; + } + + if (new_ctrl == nn->ctrl) + return; + + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN)) + return; + + nn->ctrl = new_ctrl; +} + +static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 tmp; + + nn_dbg(nn, "New MTU = %d\n", new_mtu); + + if (new_mtu < 68 || new_mtu > nn->max_mtu) { + nn_err(nn, "New MTU (%d) is not valid\n", new_mtu); + return -EINVAL; + } + + netdev->mtu = new_mtu; + + /* Freelist buffer size rounded up to the nearest 1K */ + tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND; + nn->fl_bufsz = roundup(tmp, 1024); + + /* restart if running */ + if (netif_running(netdev)) { + nfp_net_netdev_close(netdev); + nfp_net_netdev_open(netdev); + } + + return 0; +} + +static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct nfp_net *nn = netdev_priv(netdev); + int r; + + for (r = 0; r < nn->num_r_vecs; r++) { + struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; + u64 data[3]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&r_vec->rx_sync); + data[0] = r_vec->rx_pkts; + data[1] = r_vec->rx_bytes; + data[2] = r_vec->rx_drops; + } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); + stats->rx_packets += data[0]; + stats->rx_bytes += data[1]; + stats->rx_dropped += data[2]; + + do { + start = u64_stats_fetch_begin(&r_vec->tx_sync); + data[0] = r_vec->tx_pkts; + data[1] = r_vec->tx_bytes; + data[2] = r_vec->tx_errors; + } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); + stats->tx_packets += data[0]; + stats->tx_bytes += data[1]; + stats->tx_errors += data[2]; + } + + return stats; +} + +static int nfp_net_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct nfp_net *nn = netdev_priv(netdev); + u32 new_ctrl; + int err; + + /* Assume this is not called with features we have not advertised */ + + new_ctrl = nn->ctrl; + + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) + new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM; + } + + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) + new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM; + } + + if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + new_ctrl |= NFP_NET_CFG_CTRL_LSO; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_LSO; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) { + if (features & NETIF_F_HW_VLAN_CTAG_TX) + new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN; + } + + if (changed & NETIF_F_SG) { + if (features & NETIF_F_SG) + new_ctrl |= NFP_NET_CFG_CTRL_GATHER; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; + } + + nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", + netdev->features, features, changed); + + if (new_ctrl == nn->ctrl) + return 0; + + nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl); + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); + if (err) + return err; + + nn->ctrl = new_ctrl; + + return 0; +} + +static netdev_features_t +nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + u8 l4_hdr; + + /* We can't do TSO over double tagged packets (802.1AD) */ + features &= vlan_features_check(skb, features); + + if (!skb->encapsulation) + return features; + + /* Ensure that inner L4 header offset fits into TX descriptor field */ + if (skb_is_gso(skb)) { + u32 hdrlen; + + hdrlen = skb_inner_transport_header(skb) - skb->data + + inner_tcp_hdrlen(skb); + + if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ)) + features &= ~NETIF_F_GSO_MASK; + } + + /* VXLAN/GRE check */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) || + (l4_hdr == IPPROTO_UDP && + (skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; +} + +/** + * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW + * @nn: NFP Net device to reconfigure + * @idx: Index into the port table where new port should be written + * @port: UDP port to configure (pass zero to remove VXLAN port) + */ +static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) +{ + int i; + + nn->vxlan_ports[idx] = port; + + if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN)) + return; + + BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); + for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) + nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port), + be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | + be16_to_cpu(nn->vxlan_ports[i])); + + nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN); +} + +/** + * nfp_net_find_vxlan_idx() - find table entry of the port or a free one + * @nn: NFP Network structure + * @port: UDP port to look for + * + * Return: if the port is already in the table -- it's position; + * if the port is not in the table -- free position to use; + * if the table is full -- -ENOSPC. + */ +static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) +{ + int i, free_idx = -ENOSPC; + + for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { + if (nn->vxlan_ports[i] == port) + return i; + if (!nn->vxlan_usecnt[i]) + free_idx = i; + } + + return free_idx; +} + +static void nfp_net_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct nfp_net *nn = netdev_priv(netdev); + int idx; + + idx = nfp_net_find_vxlan_idx(nn, port); + if (idx == -ENOSPC) + return; + + if (!nn->vxlan_usecnt[idx]++) + nfp_net_set_vxlan_port(nn, idx, port); +} + +static void nfp_net_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct nfp_net *nn = netdev_priv(netdev); + int idx; + + idx = nfp_net_find_vxlan_idx(nn, port); + if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC) + return; + + if (!--nn->vxlan_usecnt[idx]) + nfp_net_set_vxlan_port(nn, idx, 0); +} + +static const struct net_device_ops nfp_net_netdev_ops = { + .ndo_open = nfp_net_netdev_open, + .ndo_stop = nfp_net_netdev_close, + .ndo_start_xmit = nfp_net_tx, + .ndo_get_stats64 = nfp_net_stat64, + .ndo_tx_timeout = nfp_net_tx_timeout, + .ndo_set_rx_mode = nfp_net_set_rx_mode, + .ndo_change_mtu = nfp_net_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = nfp_net_set_features, + .ndo_features_check = nfp_net_features_check, + .ndo_add_vxlan_port = nfp_net_add_vxlan_port, + .ndo_del_vxlan_port = nfp_net_del_vxlan_port, +}; + +/** + * nfp_net_info() - Print general info about the NIC + * @nn: NFP Net device to reconfigure + */ +void nfp_net_info(struct nfp_net *nn) +{ + nn_info(nn, "Netronome %s %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", + nn->is_nfp3200 ? "NFP-32xx" : "NFP-6xxx", + nn->is_vf ? "VF " : "", + nn->num_tx_rings, nn->max_tx_rings, + nn->num_rx_rings, nn->max_rx_rings); + nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", + nn->fw_ver.resv, nn->fw_ver.class, + nn->fw_ver.major, nn->fw_ver.minor, + nn->max_mtu); + nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + nn->cap, + nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", + nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", + nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", + nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", + nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", + nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", + nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", + nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", + nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", + nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", + nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", + nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "", + nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "", + nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "", + nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "", + nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : ""); +} + +/** + * nfp_net_netdev_alloc() - Allocate netdev and related structure + * @pdev: PCI device + * @max_tx_rings: Maximum number of TX rings supported by device + * @max_rx_rings: Maximum number of RX rings supported by device + * + * This function allocates a netdev device and fills in the initial + * part of the @struct nfp_net structure. + * + * Return: NFP Net device structure, or ERR_PTR on error. + */ +struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, + int max_tx_rings, int max_rx_rings) +{ + struct net_device *netdev; + struct nfp_net *nn; + int nqs; + + netdev = alloc_etherdev_mqs(sizeof(struct nfp_net), + max_tx_rings, max_rx_rings); + if (!netdev) + return ERR_PTR(-ENOMEM); + + SET_NETDEV_DEV(netdev, &pdev->dev); + nn = netdev_priv(netdev); + + nn->netdev = netdev; + nn->pdev = pdev; + + nn->max_tx_rings = max_tx_rings; + nn->max_rx_rings = max_rx_rings; + + nqs = netif_get_num_default_rss_queues(); + nn->num_tx_rings = min_t(int, nqs, max_tx_rings); + nn->num_rx_rings = min_t(int, nqs, max_rx_rings); + + nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT; + nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; + + spin_lock_init(&nn->reconfig_lock); + spin_lock_init(&nn->link_status_lock); + + return nn; +} + +/** + * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did + * @nn: NFP Net device to reconfigure + */ +void nfp_net_netdev_free(struct nfp_net *nn) +{ + free_netdev(nn->netdev); +} + +/** + * nfp_net_rss_init() - Set the initial RSS parameters + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_rss_init(struct nfp_net *nn) +{ + int i; + + netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); + + for (i = 0; i < sizeof(nn->rss_itbl); i++) + nn->rss_itbl[i] = + ethtool_rxfh_indir_default(i, nn->num_rx_rings); + + /* Enable IPv4/IPv6 TCP by default */ + nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | + NFP_NET_CFG_RSS_IPV6_TCP | + NFP_NET_CFG_RSS_TOEPLITZ | + NFP_NET_CFG_RSS_MASK; +} + +/** + * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_irqmod_init(struct nfp_net *nn) +{ + nn->rx_coalesce_usecs = 50; + nn->rx_coalesce_max_frames = 64; + nn->tx_coalesce_usecs = 50; + nn->tx_coalesce_max_frames = 64; +} + +/** + * nfp_net_netdev_init() - Initialise/finalise the netdev structure + * @netdev: netdev structure + * + * Return: 0 on success or negative errno on error. + */ +int nfp_net_netdev_init(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int err; + + /* Get some of the read-only fields from the BAR */ + nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); + nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); + + nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); + + /* Set default MTU and Freelist buffer size */ + if (nn->max_mtu < NFP_NET_DEFAULT_MTU) + netdev->mtu = nn->max_mtu; + else + netdev->mtu = NFP_NET_DEFAULT_MTU; + nn->fl_bufsz = NFP_NET_DEFAULT_RX_BUFSZ; + + /* Advertise/enable offloads based on capabilities + * + * Note: netdev->features show the currently enabled features + * and netdev->hw_features advertises which features are + * supported. By default we enable most features. + */ + netdev->hw_features = NETIF_F_HIGHDMA; + if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) { + netdev->hw_features |= NETIF_F_RXCSUM; + nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + } + if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + } + if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { + netdev->hw_features |= NETIF_F_SG; + nn->ctrl |= NFP_NET_CFG_CTRL_GATHER; + } + if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) { + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + nn->ctrl |= NFP_NET_CFG_CTRL_LSO; + } + if (nn->cap & NFP_NET_CFG_CTRL_RSS) { + netdev->hw_features |= NETIF_F_RXHASH; + nfp_net_rss_init(nn); + nn->ctrl |= NFP_NET_CFG_CTRL_RSS; + } + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && + nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL; + nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; + + netdev->hw_enc_features = netdev->hw_features; + } + + netdev->vlan_features = netdev->hw_features; + + if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + } + if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + } + + netdev->features = netdev->hw_features; + + /* Advertise but disable TSO by default. */ + netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + /* Allow L2 Broadcast and Multicast through by default, if supported */ + if (nn->cap & NFP_NET_CFG_CTRL_L2BC) + nn->ctrl |= NFP_NET_CFG_CTRL_L2BC; + if (nn->cap & NFP_NET_CFG_CTRL_L2MC) + nn->ctrl |= NFP_NET_CFG_CTRL_L2MC; + + /* Allow IRQ moderation, if supported */ + if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { + nfp_net_irqmod_init(nn); + nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + } + + /* On NFP-3200 enable MSI-X auto-masking, if supported and the + * interrupts are not shared. + */ + if (nn->is_nfp3200 && nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO) + nn->ctrl |= NFP_NET_CFG_CTRL_MSIXAUTO; + + /* On NFP4000/NFP6000, determine RX packet/metadata boundary offset */ + if (nn->fw_ver.major >= 2) + nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); + else + nn->rx_offset = NFP_NET_RX_OFFSET; + + /* Stash the re-configuration queue away. First odd queue in TX Bar */ + nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; + + /* Make sure the FW knows the netdev is supposed to be disabled here */ + nn_writel(nn, NFP_NET_CFG_CTRL, 0); + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); + err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING | + NFP_NET_CFG_UPDATE_GEN); + if (err) + return err; + + /* Finalise the netdev setup */ + ether_setup(netdev); + netdev->netdev_ops = &nfp_net_netdev_ops; + netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); + netif_carrier_off(netdev); + + nfp_net_set_ethtool_ops(netdev); + nfp_net_irqs_assign(netdev); + + return register_netdev(netdev); +} + +/** + * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did. + * @netdev: netdev structure + */ +void nfp_net_netdev_clean(struct net_device *netdev) +{ + unregister_netdev(netdev); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h new file mode 100644 index 000000000000..8692003aeed8 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_ctrl.h + * Netronome network device driver: Control BAR layout + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * Brad Petrus <brad.petrus@netronome.com> + */ + +#ifndef _NFP_NET_CTRL_H_ +#define _NFP_NET_CTRL_H_ + +/* IMPORTANT: This header file is shared with the FW, + * no OS specific constructs, please! + */ + +/** + * Configuration BAR size. + * + * The configuration BAR is 8K in size, but on the NFP6000, due to + * THB-350, 32k needs to be reserved. + */ +#define NFP_NET_CFG_BAR_SZ (32 * 1024) + +/** + * Offset in Freelist buffer where packet starts on RX + */ +#define NFP_NET_RX_OFFSET 32 + +/** + * Maximum header size supported for LSO frames + */ +#define NFP_NET_LSO_MAX_HDR_SZ 255 + +/** + * Hash type pre-pended when a RSS hash was computed + */ +#define NFP_NET_RSS_NONE 0 +#define NFP_NET_RSS_IPV4 1 +#define NFP_NET_RSS_IPV6 2 +#define NFP_NET_RSS_IPV6_EX 3 +#define NFP_NET_RSS_IPV4_TCP 4 +#define NFP_NET_RSS_IPV6_TCP 5 +#define NFP_NET_RSS_IPV6_EX_TCP 6 +#define NFP_NET_RSS_IPV4_UDP 7 +#define NFP_NET_RSS_IPV6_UDP 8 +#define NFP_NET_RSS_IPV6_EX_UDP 9 + +/** + * @NFP_NET_TXR_MAX: Maximum number of TX rings + * @NFP_NET_TXR_MASK: Mask for TX rings + * @NFP_NET_RXR_MAX: Maximum number of RX rings + * @NFP_NET_RXR_MASK: Mask for RX rings + */ +#define NFP_NET_TXR_MAX 64 +#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1) +#define NFP_NET_RXR_MAX 64 +#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1) + +/** + * Read/Write config words (0x0000 - 0x002c) + * @NFP_NET_CFG_CTRL: Global control + * @NFP_NET_CFG_UPDATE: Indicate which fields are updated + * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings + * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings + * @NFP_NET_CFG_MTU: Set MTU size + * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) + * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions + * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes + * @NFP_NET_CFG_MACADDR: MAC address + * + * TODO: + * - define Error details in UPDATE + */ +#define NFP_NET_CFG_CTRL 0x0000 +#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */ +#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */ +#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */ +#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */ +#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */ +#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */ +#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */ +#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */ +#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */ +#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */ +#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */ +#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */ +#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */ +#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ +#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ +#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ +#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ +#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ +#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */ +#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */ +#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */ +#define NFP_NET_CFG_UPDATE 0x0004 +#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ +#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ +#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */ +#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */ +#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */ +#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */ +#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */ +#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ +#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ +#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ +#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ +#define NFP_NET_CFG_TXRS_ENABLE 0x0008 +#define NFP_NET_CFG_RXRS_ENABLE 0x0010 +#define NFP_NET_CFG_MTU 0x0018 +#define NFP_NET_CFG_FLBUFSZ 0x001c +#define NFP_NET_CFG_EXN 0x001f +#define NFP_NET_CFG_LSC 0x0020 +#define NFP_NET_CFG_MACADDR 0x0024 + +/** + * Read-only words (0x0030 - 0x0050): + * @NFP_NET_CFG_VERSION: Firmware version number + * @NFP_NET_CFG_STS: Status + * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) + * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings + * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings + * @NFP_NET_MAX_MTU: Maximum support MTU + * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) + * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) + * + * TODO: + * - define more STS bits + */ +#define NFP_NET_CFG_VERSION 0x0030 +#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24) +#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16) +#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16) +#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0 +#define NFP_NET_CFG_VERSION_MAJOR_MASK (0xff << 8) +#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8) +#define NFP_NET_CFG_VERSION_MINOR_MASK (0xff << 0) +#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) +#define NFP_NET_CFG_STS 0x0034 +#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ +#define NFP_NET_CFG_CAP 0x0038 +#define NFP_NET_CFG_MAX_TXRINGS 0x003c +#define NFP_NET_CFG_MAX_RXRINGS 0x0040 +#define NFP_NET_CFG_MAX_MTU 0x0044 +/* Next two words are being used by VFs for solving THB350 issue */ +#define NFP_NET_CFG_START_TXQ 0x0048 +#define NFP_NET_CFG_START_RXQ 0x004c + +/** + * NFP-3200 workaround (0x0050 - 0x0058) + * @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix) + */ +#define NFP_NET_CFG_SPARE_ADDR 0x0050 +/** + * NFP6000/NFP4000 - Prepend configuration + */ +#define NFP_NET_CFG_RX_OFFSET 0x0050 +#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ + +/** + * NFP6000/NFP4000 - VXLAN/UDP encap configuration + * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports + * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes + */ +#define NFP_NET_CFG_VXLAN_PORT 0x0060 +#define NFP_NET_CFG_VXLAN_SZ 0x0008 + +/** + * 64B reserved for future use (0x0080 - 0x00c0) + */ +#define NFP_NET_CFG_RESERVED 0x0080 +#define NFP_NET_CFG_RESERVED_SZ 0x0040 + +/** + * RSS configuration (0x0100 - 0x01ac): + * Used only when NFP_NET_CFG_CTRL_RSS is enabled + * @NFP_NET_CFG_RSS_CFG: RSS configuration word + * @NFP_NET_CFG_RSS_KEY: RSS "secret" key + * @NFP_NET_CFG_RSS_ITBL: RSS indirection table + */ +#define NFP_NET_CFG_RSS_BASE 0x0100 +#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE +#define NFP_NET_CFG_RSS_MASK (0x7f) +#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f) +#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */ +#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */ +#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */ +#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ +#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ +#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ +#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ +#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) +#define NFP_NET_CFG_RSS_KEY_SZ 0x28 +#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ + NFP_NET_CFG_RSS_KEY_SZ) +#define NFP_NET_CFG_RSS_ITBL_SZ 0x80 + +/** + * TX ring configuration (0x200 - 0x800) + * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration + * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) + * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) + * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) + * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet + */ +#define NFP_NET_CFG_TXR_BASE 0x0200 +#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \ + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x)) +#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x)) +#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x)) +#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \ + ((_x) * 0x4)) + +/** + * RX ring configuration (0x0800 - 0x0c00) + * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration + * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries) + * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries) + * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries) + * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries) + */ +#define NFP_NET_CFG_RXR_BASE 0x0800 +#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x)) +#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x)) +#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x)) +#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \ + ((_x) * 0x4)) + +/** + * Interrupt Control/Cause registers (0x0c00 - 0x0d00) + * These registers are only used when MSI-X auto-masking is not + * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index + * by MSI-X entry and are 1B in size. If an entry is zero, the + * corresponding entry is enabled. If the FW generates an interrupt, + * it writes a cause into the corresponding field. This also masks + * the MSI-X entry and the host driver must clear the register to + * re-enable the interrupt. + */ +#define NFP_NET_CFG_ICR_BASE 0x0c00 +#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x)) +#define NFP_NET_CFG_ICR_UNMASKED 0x0 +#define NFP_NET_CFG_ICR_RXTX 0x1 +#define NFP_NET_CFG_ICR_LSC 0x2 + +/** + * General device stats (0x0d00 - 0x0d90) + * all counters are 64bit. + */ +#define NFP_NET_CFG_STATS_BASE 0x0d00 +#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00) +#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08) +#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10) +#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18) +#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20) +#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28) +#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30) +#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38) +#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40) + +#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48) +#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50) +#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58) +#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60) +#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68) +#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70) +#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78) +#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80) +#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88) + +/** + * Per ring stats (0x1000 - 0x1800) + * options, 64bit per entry + * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) + * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) + */ +#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 +#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ + ((_x) * 0x10)) +#define NFP_NET_CFG_RXR_STATS_BASE 0x1400 +#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \ + ((_x) * 0x10)) + +#endif /* _NFP_NET_CTRL_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c new file mode 100644 index 000000000000..4c97c713121c --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/rtnetlink.h> + +#include "nfp_net.h" + +static struct dentry *nfp_dir; + +static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) +{ + struct nfp_net_rx_ring *rx_ring = file->private; + int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; + struct nfp_net_rx_desc *rxd; + struct sk_buff *skb; + struct nfp_net *nn; + int i; + + rtnl_lock(); + + if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net) + goto out; + nn = rx_ring->r_vec->nfp_net; + if (!netif_running(nn->netdev)) + goto out; + + rxd_cnt = rx_ring->cnt; + + fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); + fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); + rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx); + rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); + + seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n", + rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p, + fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p); + + for (i = 0; i < rxd_cnt; i++) { + rxd = &rx_ring->rxds[i]; + seq_printf(file, "%04d: 0x%08x 0x%08x", i, + rxd->vals[0], rxd->vals[1]); + + skb = READ_ONCE(rx_ring->rxbufs[i].skb); + if (skb) + seq_printf(file, " skb->head=%p skb->data=%p", + skb->head, skb->data); + + if (rx_ring->rxbufs[i].dma_addr) + seq_printf(file, " dma_addr=%pad", + &rx_ring->rxbufs[i].dma_addr); + + if (i == rx_ring->rd_p % rxd_cnt) + seq_puts(file, " H_RD "); + if (i == rx_ring->wr_p % rxd_cnt) + seq_puts(file, " H_WR "); + if (i == fl_rd_p % rxd_cnt) + seq_puts(file, " FL_RD"); + if (i == fl_wr_p % rxd_cnt) + seq_puts(file, " FL_WR"); + if (i == rx_rd_p % rxd_cnt) + seq_puts(file, " RX_RD"); + if (i == rx_wr_p % rxd_cnt) + seq_puts(file, " RX_WR"); + + seq_putc(file, '\n'); + } +out: + rtnl_unlock(); + return 0; +} + +static int nfp_net_debugfs_rx_q_open(struct inode *inode, struct file *f) +{ + return single_open(f, nfp_net_debugfs_rx_q_read, inode->i_private); +} + +static const struct file_operations nfp_rx_q_fops = { + .owner = THIS_MODULE, + .open = nfp_net_debugfs_rx_q_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) +{ + struct nfp_net_tx_ring *tx_ring = file->private; + struct nfp_net_tx_desc *txd; + int d_rd_p, d_wr_p, txd_cnt; + struct sk_buff *skb; + struct nfp_net *nn; + int i; + + rtnl_lock(); + + if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net) + goto out; + nn = tx_ring->r_vec->nfp_net; + if (!netif_running(nn->netdev)) + goto out; + + txd_cnt = tx_ring->cnt; + + d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); + d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); + + seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", + tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); + + for (i = 0; i < txd_cnt; i++) { + txd = &tx_ring->txds[i]; + seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i, + txd->vals[0], txd->vals[1], + txd->vals[2], txd->vals[3]); + + skb = READ_ONCE(tx_ring->txbufs[i].skb); + if (skb) + seq_printf(file, " skb->head=%p skb->data=%p", + skb->head, skb->data); + if (tx_ring->txbufs[i].dma_addr) + seq_printf(file, " dma_addr=%pad", + &tx_ring->txbufs[i].dma_addr); + + if (i == tx_ring->rd_p % txd_cnt) + seq_puts(file, " H_RD"); + if (i == tx_ring->wr_p % txd_cnt) + seq_puts(file, " H_WR"); + if (i == d_rd_p % txd_cnt) + seq_puts(file, " D_RD"); + if (i == d_wr_p % txd_cnt) + seq_puts(file, " D_WR"); + + seq_putc(file, '\n'); + } +out: + rtnl_unlock(); + return 0; +} + +static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f) +{ + return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private); +} + +static const struct file_operations nfp_tx_q_fops = { + .owner = THIS_MODULE, + .open = nfp_net_debugfs_tx_q_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +void nfp_net_debugfs_adapter_add(struct nfp_net *nn) +{ + static struct dentry *queues, *tx, *rx; + char int_name[16]; + int i; + + if (IS_ERR_OR_NULL(nfp_dir)) + return; + + nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir); + if (IS_ERR_OR_NULL(nn->debugfs_dir)) + return; + + /* Create queue debugging sub-tree */ + queues = debugfs_create_dir("queue", nn->debugfs_dir); + if (IS_ERR_OR_NULL(nn->debugfs_dir)) + return; + + rx = debugfs_create_dir("rx", queues); + tx = debugfs_create_dir("tx", queues); + if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx)) + return; + + for (i = 0; i < nn->num_rx_rings; i++) { + sprintf(int_name, "%d", i); + debugfs_create_file(int_name, S_IRUSR, rx, + &nn->rx_rings[i], &nfp_rx_q_fops); + } + + for (i = 0; i < nn->num_tx_rings; i++) { + sprintf(int_name, "%d", i); + debugfs_create_file(int_name, S_IRUSR, tx, + &nn->tx_rings[i], &nfp_tx_q_fops); + } +} + +void nfp_net_debugfs_adapter_del(struct nfp_net *nn) +{ + debugfs_remove_recursive(nn->debugfs_dir); + nn->debugfs_dir = NULL; +} + +void nfp_net_debugfs_create(void) +{ + nfp_dir = debugfs_create_dir("nfp_net", NULL); +} + +void nfp_net_debugfs_destroy(void) +{ + debugfs_remove_recursive(nfp_dir); + nfp_dir = NULL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c new file mode 100644 index 000000000000..9a4084a68db5 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -0,0 +1,640 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_net_ethtool.c + * Netronome network device driver: ethtool support + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * Brad Petrus <brad.petrus@netronome.com> + */ + +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/ethtool.h> + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" + +/* Support for stats. Returns netdev, driver, and device stats */ +enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS }; +struct _nfp_net_et_stats { + char name[ETH_GSTRING_LEN]; + int type; + int sz; + int off; +}; + +#define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \ + FIELD_SIZEOF(struct net_device_stats, m), \ + offsetof(struct net_device_stats, m) +/* For stats in the control BAR (other than Q stats) */ +#define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \ + sizeof(u64), \ + (m) +static const struct _nfp_net_et_stats nfp_net_et_stats[] = { + /* netdev stats */ + {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)}, + {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)}, + {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)}, + {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)}, + {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)}, + {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)}, + {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)}, + {"multicast", NN_ET_NETDEV_STAT(multicast)}, + {"collisions", NN_ET_NETDEV_STAT(collisions)}, + {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)}, + {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)}, + {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)}, + {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)}, + {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)}, + {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)}, + {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)}, + {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)}, + /* Stats from the device */ + {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)}, + {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)}, + {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)}, + {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)}, + {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)}, + {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)}, + {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)}, + {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)}, + {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)}, + + {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)}, + {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)}, + {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)}, + {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)}, + {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)}, + {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)}, + {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)}, + {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)}, + {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)}, +}; + +#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) +#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3) +#define NN_ET_RVEC_GATHER_STATS 7 +#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2) +#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ + NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) + +static void nfp_net_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct nfp_net *nn = netdev_priv(netdev); + + strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, nfp_net_driver_version, + sizeof(drvinfo->version)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d.%d", + nn->fw_ver.resv, nn->fw_ver.class, + nn->fw_ver.major, nn->fw_ver.minor); + strlcpy(drvinfo->bus_info, pci_name(nn->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_stats = NN_ET_STATS_LEN; + drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ; +} + +static void nfp_net_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nfp_net *nn = netdev_priv(netdev); + + ring->rx_max_pending = NFP_NET_MAX_RX_DESCS; + ring->tx_max_pending = NFP_NET_MAX_TX_DESCS; + ring->rx_pending = nn->rxd_cnt; + ring->tx_pending = nn->txd_cnt; +} + +static int nfp_net_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 rxd_cnt, txd_cnt; + + if (netif_running(netdev)) { + /* Some NIC drivers allow reconfiguration on the fly, + * some down the interface, change and then up it + * again. For now we don't allow changes when the + * device is up. + */ + nn_warn(nn, "Can't change rings while device is up\n"); + return -EBUSY; + } + + /* We don't have separate queues/rings for small/large frames. */ + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + /* Round up to supported values */ + rxd_cnt = roundup_pow_of_two(ring->rx_pending); + rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS); + rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS); + + txd_cnt = roundup_pow_of_two(ring->tx_pending); + txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS); + txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS); + + if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt) + nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", + nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + + nn->rxd_cnt = rxd_cnt; + nn->txd_cnt = txd_cnt; + + return 0; +} + +static void nfp_net_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct nfp_net *nn = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { + memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < nn->num_r_vecs; i++) { + sprintf(p, "rvec_%u_rx_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rvec_%u_tx_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rvec_%u_tx_busy", i); + p += ETH_GSTRING_LEN; + } + strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "tx_gather", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + strncpy(p, "tx_lso", ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + for (i = 0; i < nn->num_tx_rings; i++) { + sprintf(p, "txq_%u_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "txq_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < nn->num_rx_rings; i++) { + sprintf(p, "rxq_%u_pkts", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rxq_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void nfp_net_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; + struct nfp_net *nn = netdev_priv(netdev); + struct rtnl_link_stats64 *netdev_stats; + struct rtnl_link_stats64 temp = {}; + u64 tmp[NN_ET_RVEC_GATHER_STATS]; + u8 __iomem *io_p; + int i, j, k; + u8 *p; + + netdev_stats = dev_get_stats(netdev, &temp); + + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { + switch (nfp_net_et_stats[i].type) { + case NETDEV_ET_STATS: + p = (char *)netdev_stats + nfp_net_et_stats[i].off; + data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ? + *(u64 *)p : *(u32 *)p; + break; + + case NFP_NET_DEV_ET_STATS: + io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; + data[i] = readq(io_p); + break; + } + } + for (j = 0; j < nn->num_r_vecs; j++) { + unsigned int start; + + do { + start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); + data[i++] = nn->r_vecs[j].rx_pkts; + tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; + tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; + tmp[2] = nn->r_vecs[j].hw_csum_rx_error; + } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start)); + + do { + start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); + data[i++] = nn->r_vecs[j].tx_pkts; + data[i++] = nn->r_vecs[j].tx_busy; + tmp[3] = nn->r_vecs[j].hw_csum_tx; + tmp[4] = nn->r_vecs[j].hw_csum_tx_inner; + tmp[5] = nn->r_vecs[j].tx_gather; + tmp[6] = nn->r_vecs[j].tx_lso; + } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start)); + + for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++) + gathered_stats[k] += tmp[k]; + } + for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) + data[i++] = gathered_stats[j]; + for (j = 0; j < nn->num_tx_rings; j++) { + io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); + data[i++] = readq(io_p); + io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; + data[i++] = readq(io_p); + } + for (j = 0; j < nn->num_rx_rings; j++) { + io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); + data[i++] = readq(io_p); + io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; + data[i++] = readq(io_p); + } +} + +static int nfp_net_get_sset_count(struct net_device *netdev, int sset) +{ + struct nfp_net *nn = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_STATS: + return NN_ET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +/* RX network flow classification (RSS, filters, etc) + */ +static u32 ethtool_flow_to_nfp_flag(u32 flow_type) +{ + static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = { + [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP, + [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP, + [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP, + [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP, + [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4, + [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6, + }; + + if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp)) + return 0; + + return xlate_ethtool_to_nfp[flow_type]; +} + +static int nfp_net_get_rss_hash_opts(struct nfp_net *nn, + struct ethtool_rxnfc *cmd) +{ + u32 nfp_rss_flag; + + cmd->data = 0; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type); + if (!nfp_rss_flag) + return -EINVAL; + + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + if (nn->rss_cfg & nfp_rss_flag) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + + return 0; +} + +static int nfp_net_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct nfp_net *nn = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nn->num_rx_rings; + return 0; + case ETHTOOL_GRXFH: + return nfp_net_get_rss_hash_opts(nn, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int nfp_net_set_rss_hash_opt(struct nfp_net *nn, + struct ethtool_rxnfc *nfc) +{ + u32 new_rss_cfg = nn->rss_cfg; + u32 nfp_rss_flag; + int err; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + /* RSS only supports IP SA/DA and L4 src/dst ports */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SA/DA fields for hashing */ + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + + nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type); + if (!nfp_rss_flag) + return -EINVAL; + + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + new_rss_cfg &= ~nfp_rss_flag; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + new_rss_cfg |= nfp_rss_flag; + break; + default: + return -EINVAL; + } + + new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ; + new_rss_cfg |= NFP_NET_CFG_RSS_MASK; + + if (new_rss_cfg == nn->rss_cfg) + return 0; + + writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL); + err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); + if (err) + return err; + + nn->rss_cfg = new_rss_cfg; + + nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg); + return 0; +} + +static int nfp_net_set_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct nfp_net *nn = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return nfp_net_set_rss_hash_opt(nn, cmd); + default: + return -EOPNOTSUPP; + } +} + +static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return 0; + + return ARRAY_SIZE(nn->rss_itbl); +} + +static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev) +{ + return NFP_NET_CFG_RSS_KEY_SZ; +} + +static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + if (indir) + for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) + indir[i] = nn->rss_itbl[i]; + if (key) + memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int nfp_net_set_rxfh(struct net_device *netdev, + const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) || + !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!key && !indir) + return 0; + + if (key) { + memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ); + nfp_net_rss_write_key(nn); + } + if (indir) { + for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) + nn->rss_itbl[i] = indir[i]; + + nfp_net_rss_write_itbl(nn); + } + + return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); +} + +/* Dump BAR registers + */ +static int nfp_net_get_regs_len(struct net_device *netdev) +{ + return NFP_NET_CFG_BAR_SZ; +} + +static void nfp_net_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nfp_net *nn = netdev_priv(netdev); + u32 *regs_buf = p; + int i; + + regs->version = nn_readl(nn, NFP_NET_CFG_VERSION); + + for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++) + regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32))); +} + +static int nfp_net_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct nfp_net *nn = netdev_priv(netdev); + + if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) + return -EINVAL; + + ec->rx_coalesce_usecs = nn->rx_coalesce_usecs; + ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames; + ec->tx_coalesce_usecs = nn->tx_coalesce_usecs; + ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames; + + return 0; +} + +static int nfp_net_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct nfp_net *nn = netdev_priv(netdev); + unsigned int factor; + + if (ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_coalesce_usecs_irq || + ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -ENOTSUPP; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->me_freq_mhz / 16; + + /* Each pair of (usecs, max_frames) fields specifies that interrupts + * should be coalesced until + * (usecs > 0 && time_since_first_completion >= usecs) || + * (max_frames > 0 && completed_frames >= max_frames) + * + * It is illegal to set both usecs and max_frames to zero as this would + * cause interrupts to never be generated. To disable coalescing, set + * usecs = 0 and max_frames = 1. + * + * Some implementations ignore the value of max_frames and use the + * condition time_since_first_completion >= usecs + */ + + if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) + return -EINVAL; + + /* ensure valid configuration */ + if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames) + return -EINVAL; + + if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames) + return -EINVAL; + + if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1)) + return -EINVAL; + + if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1)) + return -EINVAL; + + if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1)) + return -EINVAL; + + if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1)) + return -EINVAL; + + /* configuration is valid */ + nn->rx_coalesce_usecs = ec->rx_coalesce_usecs; + nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames; + nn->tx_coalesce_usecs = ec->tx_coalesce_usecs; + nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames; + + /* write configuration to device */ + nfp_net_coalesce_write_cfg(nn); + return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); +} + +static const struct ethtool_ops nfp_net_ethtool_ops = { + .get_drvinfo = nfp_net_get_drvinfo, + .get_ringparam = nfp_net_get_ringparam, + .set_ringparam = nfp_net_set_ringparam, + .get_strings = nfp_net_get_strings, + .get_ethtool_stats = nfp_net_get_stats, + .get_sset_count = nfp_net_get_sset_count, + .get_rxnfc = nfp_net_get_rxnfc, + .set_rxnfc = nfp_net_set_rxnfc, + .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size, + .get_rxfh_key_size = nfp_net_get_rxfh_key_size, + .get_rxfh = nfp_net_get_rxfh, + .set_rxfh = nfp_net_set_rxfh, + .get_regs_len = nfp_net_get_regs_len, + .get_regs = nfp_net_get_regs, + .get_coalesce = nfp_net_get_coalesce, + .set_coalesce = nfp_net_set_coalesce, +}; + +void nfp_net_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &nfp_net_ethtool_ops; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c new file mode 100644 index 000000000000..e2b22b8a20f1 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -0,0 +1,385 @@ +/* + * Copyright (C) 2015 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * nfp_netvf_main.c + * Netronome virtual function network device driver: Main entry point + * Author: Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <linux/version.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/etherdevice.h> + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" + +const char nfp_net_driver_name[] = "nfp_netvf"; +const char nfp_net_driver_version[] = "0.1"; +#define PCI_DEVICE_NFP6000VF 0x6003 +static const struct pci_device_id nfp_netvf_pci_device_ids[] = { + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, + { 0, } /* Required last entry. */ +}; +MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids); + +static void nfp_netvf_get_mac_addr(struct nfp_net *nn) +{ + u8 mac_addr[ETH_ALEN]; + + put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]); + /* We can't do readw for NFP-3200 compatibility */ + put_unaligned_be16(nn_readl(nn, NFP_NET_CFG_MACADDR + 4) >> 16, + &mac_addr[4]); + + if (!is_valid_ether_addr(mac_addr)) { + eth_hw_addr_random(nn->netdev); + return; + } + + ether_addr_copy(nn->netdev->dev_addr, mac_addr); + ether_addr_copy(nn->netdev->perm_addr, mac_addr); +} + +static int nfp_netvf_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct nfp_net_fw_version fw_ver; + int max_tx_rings, max_rx_rings; + u32 tx_bar_off, rx_bar_off; + u32 tx_bar_sz, rx_bar_sz; + int tx_bar_no, rx_bar_no; + u8 __iomem *ctrl_bar; + struct nfp_net *nn; + int is_nfp3200; + u32 startq; + int stride; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = pci_request_regions(pdev, nfp_net_driver_name); + if (err) { + dev_err(&pdev->dev, "Unable to allocate device memory.\n"); + goto err_pci_disable; + } + + switch (pdev->device) { + case PCI_DEVICE_NFP6000VF: + is_nfp3200 = 0; + break; + default: + err = -ENODEV; + goto err_pci_regions; + } + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS)); + if (err) + goto err_pci_regions; + + /* Map the Control BAR. + * + * Irrespective of the advertised BAR size we only map the + * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code + * the identical for PF and VF drivers. + */ + ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR), + NFP_NET_CFG_BAR_SZ); + if (!ctrl_bar) { + dev_err(&pdev->dev, + "Failed to map resource %d\n", NFP_NET_CRTL_BAR); + err = -EIO; + goto err_pci_regions; + } + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) || + nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) || + nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) { + stride = 2; + tx_bar_no = NFP_NET_Q0_BAR; + rx_bar_no = NFP_NET_Q1_BAR; + dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 3: + if (is_nfp3200) { + stride = 2; + tx_bar_no = NFP_NET_Q0_BAR; + rx_bar_no = NFP_NET_Q1_BAR; + } else { + stride = 4; + tx_bar_no = NFP_NET_Q0_BAR; + rx_bar_no = tx_bar_no; + } + break; + default: + dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.resv, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto err_ctrl_unmap; + } + } + + /* Find out how many rings are supported */ + max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); + max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); + + tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride; + rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride; + + /* Sanity checks */ + if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) { + dev_err(&pdev->dev, + "TX BAR too small for number of TX rings. Adjusting\n"); + tx_bar_sz = pci_resource_len(pdev, tx_bar_no); + max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; + } + if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) { + dev_err(&pdev->dev, + "RX BAR too small for number of RX rings. Adjusting\n"); + rx_bar_sz = pci_resource_len(pdev, rx_bar_no); + max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; + } + + /* XXX Implement a workaround for THB-350 here. Ideally, we + * have a different PCI ID for A rev VFs. + */ + switch (pdev->device) { + case PCI_DEVICE_NFP6000VF: + startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); + tx_bar_off = NFP_PCIE_QUEUE(startq); + startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); + rx_bar_off = NFP_PCIE_QUEUE(startq); + break; + default: + err = -ENODEV; + goto err_ctrl_unmap; + } + + /* Allocate and initialise the netdev */ + nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings); + if (IS_ERR(nn)) { + err = PTR_ERR(nn); + goto err_ctrl_unmap; + } + + nn->fw_ver = fw_ver; + nn->ctrl_bar = ctrl_bar; + nn->is_vf = 1; + nn->is_nfp3200 = is_nfp3200; + nn->stride_tx = stride; + nn->stride_rx = stride; + + if (rx_bar_no == tx_bar_no) { + u32 bar_off, bar_sz; + resource_size_t map_addr; + + /* Make a single overlapping BAR mapping */ + if (tx_bar_off < rx_bar_off) + bar_off = tx_bar_off; + else + bar_off = rx_bar_off; + + if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz)) + bar_sz = (tx_bar_off + tx_bar_sz) - bar_off; + else + bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; + + map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; + nn->q_bar = ioremap_nocache(map_addr, bar_sz); + if (!nn->q_bar) { + nn_err(nn, "Failed to map resource %d\n", tx_bar_no); + err = -EIO; + goto err_netdev_free; + } + + /* TX queues */ + nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off); + /* RX queues */ + nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off); + } else { + resource_size_t map_addr; + + /* TX queues */ + map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off; + nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz); + if (!nn->tx_bar) { + nn_err(nn, "Failed to map resource %d\n", tx_bar_no); + err = -EIO; + goto err_netdev_free; + } + + /* RX queues */ + map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off; + nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz); + if (!nn->rx_bar) { + nn_err(nn, "Failed to map resource %d\n", rx_bar_no); + err = -EIO; + goto err_unmap_tx; + } + } + + nfp_netvf_get_mac_addr(nn); + + err = nfp_net_irqs_alloc(nn); + if (!err) { + nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); + err = -EIO; + goto err_unmap_rx; + } + + /* Get ME clock frequency from ctrl BAR + * XXX for now frequency is hardcoded until we figure out how + * to get the value from nfp-hwinfo into ctrl bar + */ + nn->me_freq_mhz = 1200; + + err = nfp_net_netdev_init(nn->netdev); + if (err) + goto err_irqs_disable; + + pci_set_drvdata(pdev, nn); + + nfp_net_info(nn); + nfp_net_debugfs_adapter_add(nn); + + return 0; + +err_irqs_disable: + nfp_net_irqs_disable(nn); +err_unmap_rx: + if (!nn->q_bar) + iounmap(nn->rx_bar); +err_unmap_tx: + if (!nn->q_bar) + iounmap(nn->tx_bar); + else + iounmap(nn->q_bar); +err_netdev_free: + pci_set_drvdata(pdev, NULL); + nfp_net_netdev_free(nn); +err_ctrl_unmap: + iounmap(ctrl_bar); +err_pci_regions: + pci_release_regions(pdev); +err_pci_disable: + pci_disable_device(pdev); + return err; +} + +static void nfp_netvf_pci_remove(struct pci_dev *pdev) +{ + struct nfp_net *nn = pci_get_drvdata(pdev); + + /* Note, the order is slightly different from above as we need + * to keep the nn pointer around till we have freed everything. + */ + nfp_net_debugfs_adapter_del(nn); + + nfp_net_netdev_clean(nn->netdev); + + nfp_net_irqs_disable(nn); + + if (!nn->q_bar) { + iounmap(nn->rx_bar); + iounmap(nn->tx_bar); + } else { + iounmap(nn->q_bar); + } + iounmap(nn->ctrl_bar); + + pci_set_drvdata(pdev, NULL); + + nfp_net_netdev_free(nn); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver nfp_netvf_pci_driver = { + .name = nfp_net_driver_name, + .id_table = nfp_netvf_pci_device_ids, + .probe = nfp_netvf_pci_probe, + .remove = nfp_netvf_pci_remove, +}; + +static int __init nfp_netvf_init(void) +{ + int err; + + pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n", + nfp_net_driver_name); + + nfp_net_debugfs_create(); + err = pci_register_driver(&nfp_netvf_pci_driver); + if (err) { + nfp_net_debugfs_destroy(); + return err; + } + + return 0; +} + +static void __exit nfp_netvf_exit(void) +{ + pci_unregister_driver(&nfp_netvf_pci_driver); + nfp_net_debugfs_destroy(); +} + +module_init(nfp_netvf_init); +module_exit(nfp_netvf_exit); + +MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("NFP VF network device driver"); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 057665180f13..b1ce7aaa8f8b 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -797,7 +797,7 @@ static int lpc_mii_probe(struct net_device *ndev) netdev_info(ndev, "using MII interface\n"); else netdev_info(ndev, "using RMII interface\n"); - phydev = phy_connect(ndev, dev_name(&phydev->dev), + phydev = phy_connect(ndev, phydev_name(phydev), &lpc_handle_link_change, lpc_phy_interface_mode(&pldat->pdev->dev)); @@ -816,15 +816,14 @@ static int lpc_mii_probe(struct net_device *ndev) pldat->duplex = -1; pldat->phy_dev = phydev; - netdev_info(ndev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); + return 0; } static int lpc_mii_init(struct netdata_local *pldat) { - int err = -ENXIO, i; + int err = -ENXIO; pldat->mii_bus = mdiobus_alloc(); if (!pldat->mii_bus) { @@ -851,19 +850,10 @@ static int lpc_mii_init(struct netdata_local *pldat) pldat->mii_bus->priv = pldat; pldat->mii_bus->parent = &pldat->pdev->dev; - pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!pldat->mii_bus->irq) { - err = -ENOMEM; - goto err_out_1; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - pldat->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(pldat->pdev, pldat->mii_bus); if (mdiobus_register(pldat->mii_bus)) - goto err_out_free_mdio_irq; + goto err_out_unregister_bus; if (lpc_mii_probe(pldat->ndev) != 0) goto err_out_unregister_bus; @@ -872,9 +862,6 @@ static int lpc_mii_init(struct netdata_local *pldat) err_out_unregister_bus: mdiobus_unregister(pldat->mii_bus); -err_out_free_mdio_irq: - kfree(pldat->mii_bus->irq); -err_out_1: mdiobus_free(pldat->mii_bus); err_out: return err; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index 08d4be616064..e097e6baaac4 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -500,7 +500,7 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) val = XsumTX; pch_gbe_validate_option(&val, &opt, adapter); if (!val) - dev->features &= ~NETIF_F_ALL_CSUM; + dev->features &= ~NETIF_F_CSUM_MASK; } { /* Flow Control */ static const struct pch_gbe_option opt = { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b2f8e854dfd1..264e954675d1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3993,6 +3993,8 @@ struct public_drv_mb { #define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; @@ -4044,6 +4046,10 @@ struct public_drv_mb { #define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 #define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 174f7341c5c3..9d76ce249277 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1115,6 +1115,23 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_set_led(hwfn, ptt, mode); + + qed_ptt_release(hwfn, ptt); + + return status; +} + const struct qed_common_ops qed_common_ops_pass = { .probe = &qed_probe, .remove = &qed_remove, @@ -1135,6 +1152,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .set_led = &qed_set_led, }; u32 qed_get_protocol_version(enum qed_protocol protocol) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 20d048cdcb88..ba1b1f1ef789 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -858,3 +858,30 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, return 0; } + +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_led_mode mode) +{ + u32 resp = 0, param = 0, drv_mb_param; + int rc; + + switch (mode) { + case QED_LED_MODE_ON: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; + break; + case QED_LED_MODE_OFF: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; + break; + case QED_LED_MODE_RESTORE: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; + break; + default: + DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, + drv_mb_param, &resp, ¶m); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index dbaae586b4a7..506197d5c3dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -224,6 +224,19 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_drv_version *p_ver); +/** + * @brief Set LED status + * + * @param p_hwfn + * @param p_ptt + * @param mode - LED mode + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_led_mode mode); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index ea00d5f3bab4..7c6caf7f6612 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -116,6 +116,7 @@ struct qede_dev { (edev)->dev_info.num_tc) struct qede_fastpath *fp_array; + u16 req_rss; u16 num_rss; u8 num_tc; #define QEDE_RSS_CNT(edev) ((edev)->num_rss) @@ -269,13 +270,13 @@ int qede_change_mtu(struct net_device *dev, int new_mtu); void qede_fill_by_demand_stats(struct qede_dev *edev); #define RX_RING_SIZE_POW 13 -#define RX_RING_SIZE BIT(RX_RING_SIZE_POW) +#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MIN 128 #define NUM_RX_BDS_DEF NUM_RX_BDS_MAX #define TX_RING_SIZE_POW 13 -#define TX_RING_SIZE BIT(TX_RING_SIZE_POW) +#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 3a362476a22c..e442b85c9a5e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -322,6 +322,30 @@ static void qede_set_msglevel(struct net_device *ndev, u32 level) dp_module, dp_level); } +static int qede_nway_reset(struct net_device *dev) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + struct qed_link_params link_params; + + if (!netif_running(dev)) + return 0; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + if (!current_link.link_up) + return 0; + + /* Toggle the link */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = false; + edev->ops->common->set_link(edev->cdev, &link_params); + link_params.link_up = true; + edev->ops->common->set_link(edev->cdev, &link_params); + + return 0; +} + static u32 qede_get_link(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); @@ -333,6 +357,106 @@ static u32 qede_get_link(struct net_device *dev) return current_link.link_up; } +static void qede_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct qede_dev *edev = netdev_priv(dev); + + ering->rx_max_pending = NUM_RX_BDS_MAX; + ering->rx_pending = edev->q_num_rx_buffers; + ering->tx_max_pending = NUM_TX_BDS_MAX; + ering->tx_pending = edev->q_num_tx_buffers; +} + +static int qede_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct qede_dev *edev = netdev_priv(dev); + + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", + ering->rx_pending, ering->tx_pending); + + /* Validate legality of configuration */ + if (ering->rx_pending > NUM_RX_BDS_MAX || + ering->rx_pending < NUM_RX_BDS_MIN || + ering->tx_pending > NUM_TX_BDS_MAX || + ering->tx_pending < NUM_TX_BDS_MIN) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", + NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, + NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); + return -EINVAL; + } + + /* Change ring size and re-load */ + edev->q_num_rx_buffers = ering->rx_pending; + edev->q_num_tx_buffers = ering->tx_pending; + + if (netif_running(edev->ndev)) + qede_reload(edev, NULL, NULL); + + return 0; +} + +static void qede_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + epause->autoneg = true; + if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) + epause->rx_pause = true; + if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) + epause->tx_pause = true; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, + epause->tx_pause); +} + +static int qede_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_params params; + struct qed_link_output current_link; + + if (!edev->dev_info.common.is_mf) { + DP_INFO(edev, + "Pause parameters can not be updated in non-default mode\n"); + return -EOPNOTSUPP; + } + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; + if (epause->autoneg) { + if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { + DP_INFO(edev, "autoneg not supported\n"); + return -EINVAL; + } + params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + } + if (epause->rx_pause) + params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; + if (epause->tx_pause) + params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; + + params.link_up = true; + edev->ops->common->set_link(edev->cdev, ¶ms); + + return 0; +} + static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) { edev->ndev->mtu = args->mtu; @@ -366,17 +490,104 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu) return 0; } +static void qede_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qede_dev *edev = netdev_priv(dev); + + channels->max_combined = QEDE_MAX_RSS_CNT(edev); + channels->combined_count = QEDE_RSS_CNT(edev); +} + +static int qede_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qede_dev *edev = netdev_priv(dev); + + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", + channels->rx_count, channels->tx_count, + channels->other_count, channels->combined_count); + + /* We don't support separate rx / tx, nor `other' channels. */ + if (channels->rx_count || channels->tx_count || + channels->other_count || (channels->combined_count == 0) || + (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "command parameters not supported\n"); + return -EINVAL; + } + + /* Check if there was a change in the active parameters */ + if (channels->combined_count == QEDE_RSS_CNT(edev)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "No change in active parameters\n"); + return 0; + } + + /* We need the number of queues to be divisible between the hwfns */ + if (channels->combined_count % edev->dev_info.common.num_hwfns) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Number of channels must be divisable by %04x\n", + edev->dev_info.common.num_hwfns); + return -EINVAL; + } + + /* Set number of queues and reload if necessary */ + edev->req_rss = channels->combined_count; + if (netif_running(dev)) + qede_reload(edev, NULL, NULL); + + return 0; +} + +static int qede_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct qede_dev *edev = netdev_priv(dev); + u8 led_state = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + led_state = QED_LED_MODE_ON; + break; + + case ETHTOOL_ID_OFF: + led_state = QED_LED_MODE_OFF; + break; + + case ETHTOOL_ID_INACTIVE: + led_state = QED_LED_MODE_RESTORE; + break; + } + + edev->ops->common->set_led(edev->cdev, led_state); + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, + .nway_reset = qede_nway_reset, .get_link = qede_get_link, + .get_ringparam = qede_get_ringparam, + .set_ringparam = qede_set_ringparam, + .get_pauseparam = qede_get_pauseparam, + .set_pauseparam = qede_set_pauseparam, .get_strings = qede_get_strings, + .set_phys_id = qede_set_phys_id, .get_ethtool_stats = qede_get_ethtool_stats, .get_sset_count = qede_get_sset_count, + .get_channels = qede_get_channels, + .set_channels = qede_set_channels, }; void qede_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f4657a2e730a..6237f10b5119 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1502,8 +1502,11 @@ static int qede_set_num_queues(struct qede_dev *edev) u16 rss_num; /* Setup queues according to possible resources*/ - rss_num = netif_get_num_default_rss_queues() * - edev->dev_info.common.num_hwfns; + if (edev->req_rss) + rss_num = edev->req_rss; + else + rss_num = netif_get_num_default_rss_queues() * + edev->dev_info.common.num_hwfns; rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index a72bcddf160a..4b76c69fe86d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -167,7 +167,7 @@ struct qlcnic_dcb_cfg { u32 version; }; -static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { +static const struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, @@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .aen_handler = qlcnic_83xx_dcb_aen_handler, }; -static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { +static const struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 3cf4a10fbe1e..9777e5713525 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -37,7 +37,7 @@ struct qlcnic_dcb { struct qlcnic_adapter *adapter; struct delayed_work aen_work; struct workqueue_struct *wq; - struct qlcnic_dcb_ops *ops; + const struct qlcnic_dcb_ops *ops; struct qlcnic_dcb_cfg *cfg; unsigned long state; }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index d4b5085a21fa..7bd6f25b4625 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1604,7 +1604,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; - netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, + netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, NAPI_POLL_WEIGHT); } } @@ -2135,7 +2135,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; - netif_napi_add(netdev, &tx_ring->napi, + netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_83xx_msix_tx_poll, NAPI_POLL_WEIGHT); } diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 9a37247cf4b8..6b541e57c96a 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1039,7 +1039,7 @@ static int r6040_mii_probe(struct net_device *dev) return -ENODEV; } - phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link, + phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -1061,9 +1061,7 @@ static int r6040_mii_probe(struct net_device *dev) lp->old_link = 0; lp->old_duplex = -1; - dev_info(&lp->pdev->dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); + phy_attached_info(phydev); return 0; } @@ -1077,7 +1075,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int card_idx = -1; int bar = 0; u16 *adrp; - int i; pr_info("%s\n", version); @@ -1189,19 +1186,11 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) lp->mii_bus->name = "r6040_eth_mii"; snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", dev_name(&pdev->dev), card_idx); - lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (!lp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_mdio; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(lp->mii_bus); if (err) { dev_err(&pdev->dev, "failed to register MII bus\n"); - goto err_out_mdio_irq; + goto err_out_mdio; } err = r6040_mii_probe(dev); @@ -1220,8 +1209,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_mdio_unregister: mdiobus_unregister(lp->mii_bus); -err_out_mdio_irq: - kfree(lp->mii_bus->irq); err_out_mdio: mdiobus_free(lp->mii_bus); err_out_unmap: @@ -1244,7 +1231,6 @@ static void r6040_remove_one(struct pci_dev *pdev) unregister_netdev(dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); pci_iounmap(pdev, lp->base); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 79ef799f88ab..17d5571d0432 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3894,7 +3894,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) /* disable phy pfm mode */ rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); rtl_writephy(tp, 0x1f, 0x0000); /* Check ALDPS bit, disable it if enabled */ @@ -3947,7 +3947,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || - (ioffset_p1 != 0x0f) || (ioffset_p0 == 0x0f)) { + (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) { rtl_writephy(tp, 0x1f, 0x0bcf); rtl_writephy(tp, 0x16, data); rtl_writephy(tp, 0x1f, 0x0000); @@ -3967,7 +3967,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) /* disable phy pfm mode */ rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); rtl_writephy(tp, 0x1f, 0x0000); /* Check ALDPS bit, disable it if enabled */ @@ -5818,11 +5818,10 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168d_4[] = { - { 0x0b, ~0, 0x48 }, - { 0x19, 0x20, 0x50 }, - { 0x0c, ~0, 0x20 } + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 } }; - int i; rtl_csi_access_enable_1(tp); @@ -5830,13 +5829,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) RTL_W8(MaxTxPacketSize, TxPacketMax); - for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) { - const struct ephy_info *e = e_info_8168d_4 + i; - u16 w; - - w = rtl_ephy_read(tp, e->offset); - rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits); - } + rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4)); rtl_enable_clock_request(pdev); } @@ -6127,7 +6120,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); @@ -6136,7 +6129,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_pcie_state_l2l3_enable(tp, false); rtl_writephy(tp, 0x1f, 0x0c42); - rg_saw_cnt = rtl_readphy(tp, 0x13); + rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff); rtl_writephy(tp, 0x1f, 0x0000); if (rg_saw_cnt > 0) { u16 sw_cnt_1ms_ini; @@ -6252,7 +6245,7 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) rtl_hw_start_8168ep(tp); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); } static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) @@ -6274,7 +6267,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) rtl_hw_start_8168ep(tp); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); data = r8168_mac_ocp_read(tp, 0xd3e2); data &= 0xf000; diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 0623fff932e4..9fbe92ac225b 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -206,6 +206,7 @@ enum CCC_BIT { CCC_OPC_RESET = 0x00000000, CCC_OPC_CONFIG = 0x00000001, CCC_OPC_OPERATION = 0x00000002, + CCC_GAC = 0x00000080, CCC_DTSR = 0x00000100, CCC_CSEL = 0x00030000, CCC_CSEL_HPB = 0x00010000, @@ -576,6 +577,9 @@ enum GTI_BIT { GTI_TIV = 0x0FFFFFFF, }; +#define GTI_TIV_MAX GTI_TIV +#define GTI_TIV_MIN 0x20 + /* GIC */ enum GIC_BIT { GIC_PTCE = 0x00000001, /* Undocumented? */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 467d41698fd5..ac43ed914fcf 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -32,6 +32,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> +#include <asm/div64.h> + #include "ravb.h" #define RAVB_DEF_MSG_ENABLE \ @@ -113,12 +115,15 @@ static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) if (mac) { ether_addr_copy(ndev->dev_addr, mac); } else { - ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24); - ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF; - ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF; - ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF; - ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF; - ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF; + u32 mahr = ravb_read(ndev, MAHR); + u32 malr = ravb_read(ndev, MALR); + + ndev->dev_addr[0] = (mahr >> 24) & 0xFF; + ndev->dev_addr[1] = (mahr >> 16) & 0xFF; + ndev->dev_addr[2] = (mahr >> 8) & 0xFF; + ndev->dev_addr[3] = (mahr >> 0) & 0xFF; + ndev->dev_addr[4] = (malr >> 8) & 0xFF; + ndev->dev_addr[5] = (malr >> 0) & 0xFF; } } @@ -338,16 +343,13 @@ error: static void ravb_emac_init(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - u32 ecmr; /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); /* PAUSE prohibition */ - ecmr = ravb_read(ndev, ECMR); - ecmr &= ECMR_DM; - ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; - ravb_write(ndev, ecmr, ECMR); + ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE, ECMR); ravb_set_rate(ndev); @@ -405,9 +407,11 @@ static int ravb_dmac_init(struct net_device *ndev) /* Timestamp enable */ ravb_write(ndev, TCCR_TFEN, TCCR); - /* Interrupt enable: */ + /* Interrupt init: */ /* Frame receive */ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); + /* Disable FIFO full warning */ + ravb_write(ndev, 0, RIC1); /* Receive FIFO full error, descriptor empty */ ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); /* Frame transmitted, timestamp FIFO updated */ @@ -875,6 +879,7 @@ static int ravb_phy_init(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); struct phy_device *phydev; struct device_node *pn; + int err; priv->link = 0; priv->speed = 0; @@ -882,6 +887,17 @@ static int ravb_phy_init(struct net_device *ndev) /* Try connecting to PHY */ pn = of_parse_phandle(np, "phy-handle", 0); + if (!pn) { + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + return err; + } + pn = of_node_get(np); + } phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, priv->phy_interface); if (!phydev) { @@ -908,8 +924,7 @@ static int ravb_phy_init(struct net_device *ndev) /* 10BASE is not supported */ phydev->supported &= ~PHY_10BT_FEATURES; - netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", - phydev->addr, phydev->irq, phydev->drv->name); + phy_attached_info(phydev); priv->phydev = phydev; @@ -1232,7 +1247,8 @@ static int ravb_open(struct net_device *ndev) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - ravb_ptp_init(ndev, priv->pdev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1245,7 +1261,8 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); out_free_irq2: if (priv->chip_id == RCAR_GEN3) free_irq(priv->emac_irq, ndev); @@ -1474,12 +1491,12 @@ static int ravb_close(struct net_device *ndev) /* Disable interrupts by clearing the interrupt masks. */ ravb_write(ndev, 0, RIC0); - ravb_write(ndev, 0, RIC1); ravb_write(ndev, 0, RIC2); ravb_write(ndev, 0, TIC); /* Stop PTP Clock driver */ - ravb_ptp_stop(ndev); + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); /* Set the config mode to stop the AVB-DMAC's processes */ if (ravb_stop_dma(ndev) < 0) @@ -1659,11 +1676,45 @@ static int ravb_mdio_release(struct ravb_private *priv) static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, + { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 }, + { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 }, { } }; MODULE_DEVICE_TABLE(of, ravb_match_table); +static int ravb_set_gti(struct net_device *ndev) +{ + + struct device *dev = ndev->dev.parent; + struct device_node *np = dev->of_node; + unsigned long rate; + struct clk *clk; + uint64_t inc; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + dev_err(dev, "could not get clock\n"); + return PTR_ERR(clk); + } + + rate = clk_get_rate(clk); + clk_put(clk); + + inc = 1000000000ULL << 20; + do_div(inc, rate); + + if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { + dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", + inc, GTI_TIV_MIN, GTI_TIV_MAX); + return -EINVAL; + } + + ravb_write(ndev, inc, GTI); + + return 0; +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1752,15 +1803,25 @@ static int ravb_probe(struct platform_device *pdev) ndev->ethtool_ops = &ravb_ethtool_ops; /* Set AVB config mode */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, - CCC); + if (chip_id == RCAR_GEN2) { + ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | + CCC_OPC_CONFIG, CCC); + /* Set CSEL value */ + ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | + CCC_CSEL_HPB, CCC); + } else { + ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | + CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); + } /* Set CSEL value */ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB, CCC); /* Set GTI value */ - ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI); + error = ravb_set_gti(ndev); + if (error) + goto out_release; /* Request GTI loading */ ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR); @@ -1783,6 +1844,10 @@ static int ravb_probe(struct platform_device *pdev) /* Initialise HW timestamp list */ INIT_LIST_HEAD(&priv->ts_skb_list); + /* Initialise PTP Clock driver */ + if (chip_id != RCAR_GEN2) + ravb_ptp_init(ndev, pdev); + /* Debug message level */ priv->msg_enable = RAVB_DEF_MSG_ENABLE; @@ -1824,6 +1889,10 @@ out_napi_del: out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); + + /* Stop PTP Clock driver */ + if (chip_id != RCAR_GEN2) + ravb_ptp_stop(ndev); out_release: if (ndev) free_netdev(ndev); @@ -1838,6 +1907,10 @@ static int ravb_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); + /* Stop PTP Clock driver */ + if (priv->chip_id != RCAR_GEN2) + ravb_ptp_stop(ndev); + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); /* Set reset mode */ diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6a8fc0f341ff..dfa9e59c9442 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -473,6 +473,109 @@ static void sh_eth_set_duplex(struct net_device *ndev) sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); } +static void sh_eth_chip_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); +} + +static void sh_eth_set_rate_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, GECMR_10, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, GECMR_100, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, GECMR_1000, GECMR); + break; + default: + break; + } +} + +#ifdef CONFIG_OF +/* R7S72100 */ +static struct sh_eth_cpu_data r7s72100_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + + .register_type = SH_ETH_REG_FAST_RZ, + + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = 0xff7f009f, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .no_psr = 1, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .hw_crc = 1, + .tsu = 1, + .shift_rd0 = 1, +}; + +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); + + sh_eth_select_mii(ndev); +} + +/* R8A7740 */ +static struct sh_eth_cpu_data r8a7740_data = { + .chip_reset = sh_eth_chip_reset_r8a7740, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .select_mii = 1, + .shift_rd0 = 1, +}; + /* There is CPU dependent code */ static void sh_eth_set_rate_r8a777x(struct net_device *ndev) { @@ -538,6 +641,7 @@ static struct sh_eth_cpu_data r8a779x_data = { .hw_swap = 1, .rmiimode = 1, }; +#endif /* CONFIG_OF */ static void sh_eth_set_rate_sh7724(struct net_device *ndev) { @@ -695,34 +799,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .tsu = 1, }; -static void sh_eth_chip_reset(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); - mdelay(1); -} - -static void sh_eth_set_rate_gether(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - switch (mdp->speed) { - case 10: /* 10BASE */ - sh_eth_write(ndev, GECMR_10, GECMR); - break; - case 100:/* 100BASE */ - sh_eth_write(ndev, GECMR_100, GECMR); - break; - case 1000: /* 1000BASE */ - sh_eth_write(ndev, GECMR_1000, GECMR); - break; - default: - break; - } -} - /* SH7734 */ static struct sh_eth_cpu_data sh7734_data = { .chip_reset = sh_eth_chip_reset, @@ -780,80 +856,6 @@ static struct sh_eth_cpu_data sh7763_data = { .irq_flags = IRQF_SHARED, }; -static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); - mdelay(1); - - sh_eth_select_mii(ndev); -} - -/* R8A7740 */ -static struct sh_eth_cpu_data r8a7740_data = { - .chip_reset = sh_eth_chip_reset_r8a7740, - .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate_gether, - - .register_type = SH_ETH_REG_GIGABIT, - - .ecsr_value = ECSR_ICD | ECSR_MPD, - .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, - - .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, - .fdr_value = 0x0000070f, - - .apr = 1, - .mpr = 1, - .tpauser = 1, - .bculr = 1, - .hw_swap = 1, - .rpadir = 1, - .rpadir_value = 2 << 16, - .no_trimd = 1, - .no_ade = 1, - .tsu = 1, - .select_mii = 1, - .shift_rd0 = 1, -}; - -/* R7S72100 */ -static struct sh_eth_cpu_data r7s72100_data = { - .chip_reset = sh_eth_chip_reset, - .set_duplex = sh_eth_set_duplex, - - .register_type = SH_ETH_REG_FAST_RZ, - - .ecsr_value = ECSR_ICD, - .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xff7f009f, - - .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, - .fdr_value = 0x0000070f, - - .no_psr = 1, - .apr = 1, - .mpr = 1, - .tpauser = 1, - .hw_swap = 1, - .rpadir = 1, - .rpadir_value = 2 << 16, - .no_trimd = 1, - .no_ade = 1, - .hw_crc = 1, - .tsu = 1, - .shift_rd0 = 1, -}; - static struct sh_eth_cpu_data sh7619_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, @@ -965,30 +967,6 @@ static void sh_eth_set_receive_align(struct sk_buff *skb) skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); } - -/* CPU <-> EDMAC endian convert */ -static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) -{ - switch (mdp->edmac_endian) { - case EDMAC_LITTLE_ENDIAN: - return cpu_to_le32(x); - case EDMAC_BIG_ENDIAN: - return cpu_to_be32(x); - } - return x; -} - -static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) -{ - switch (mdp->edmac_endian) { - case EDMAC_LITTLE_ENDIAN: - return le32_to_cpu(x); - case EDMAC_BIG_ENDIAN: - return be32_to_cpu(x); - } - return x; -} - /* Program the hardware MAC address from dev->dev_addr. */ static void update_mac_address(struct net_device *ndev) { @@ -1011,12 +989,15 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac) if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { memcpy(ndev->dev_addr, mac, ETH_ALEN); } else { - ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); - ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; - ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; - ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); - ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; - ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); + u32 mahr = sh_eth_read(ndev, MAHR); + u32 malr = sh_eth_read(ndev, MALR); + + ndev->dev_addr[0] = (mahr >> 24) & 0xFF; + ndev->dev_addr[1] = (mahr >> 16) & 0xFF; + ndev->dev_addr[2] = (mahr >> 8) & 0xFF; + ndev->dev_addr[3] = (mahr >> 0) & 0xFF; + ndev->dev_addr[4] = (malr >> 8) & 0xFF; + ndev->dev_addr[5] = (malr >> 0) & 0xFF; } } @@ -1032,56 +1013,34 @@ struct bb_info { void (*set_gate)(void *addr); struct mdiobb_ctrl ctrl; void *addr; - u32 mmd_msk;/* MMD */ - u32 mdo_msk; - u32 mdi_msk; - u32 mdc_msk; }; -/* PHY bit set */ -static void bb_set(void *addr, u32 msk) +static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) { - iowrite32(ioread32(addr) | msk, addr); -} + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + u32 pir; -/* PHY bit clear */ -static void bb_clr(void *addr, u32 msk) -{ - iowrite32((ioread32(addr) & ~msk), addr); -} + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); -/* PHY bit read */ -static int bb_read(void *addr, u32 msk) -{ - return (ioread32(addr) & msk) != 0; + pir = ioread32(bitbang->addr); + if (set) + pir |= mask; + else + pir &= ~mask; + iowrite32(pir, bitbang->addr); } /* Data I/O pin control */ static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) { - struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); - - if (bitbang->set_gate) - bitbang->set_gate(bitbang->addr); - - if (bit) - bb_set(bitbang->addr, bitbang->mmd_msk); - else - bb_clr(bitbang->addr, bitbang->mmd_msk); + sh_mdio_ctrl(ctrl, PIR_MMD, bit); } /* Set bit data*/ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) { - struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); - - if (bitbang->set_gate) - bitbang->set_gate(bitbang->addr); - - if (bit) - bb_set(bitbang->addr, bitbang->mdo_msk); - else - bb_clr(bitbang->addr, bitbang->mdo_msk); + sh_mdio_ctrl(ctrl, PIR_MDO, bit); } /* Get bit data*/ @@ -1092,21 +1051,13 @@ static int sh_get_mdio(struct mdiobb_ctrl *ctrl) if (bitbang->set_gate) bitbang->set_gate(bitbang->addr); - return bb_read(bitbang->addr, bitbang->mdi_msk); + return (ioread32(bitbang->addr) & PIR_MDI) != 0; } /* MDC pin control */ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) { - struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); - - if (bitbang->set_gate) - bitbang->set_gate(bitbang->addr); - - if (bit) - bb_set(bitbang->addr, bitbang->mdc_msk); - else - bb_clr(bitbang->addr, bitbang->mdc_msk); + sh_mdio_ctrl(ctrl, PIR_MDC, bit); } /* mdio bus control struct */ @@ -1189,7 +1140,7 @@ static void sh_eth_ring_format(struct net_device *ndev) rxdesc = &mdp->rx_ring[i]; /* The size of the buffer is a multiple of 32 bytes. */ buf_len = ALIGN(mdp->rx_buf_sz, 32); - rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); + rxdesc->len = cpu_to_le32(buf_len << 16); dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) { @@ -1197,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev) break; } mdp->rx_skbuff[i] = skb; - rxdesc->addr = cpu_to_edmac(mdp, dma_addr); - rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); + rxdesc->addr = cpu_to_le32(dma_addr); + rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); /* Rx descriptor address set */ if (i == 0) { @@ -1212,7 +1163,7 @@ static void sh_eth_ring_format(struct net_device *ndev) mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); /* Mark the last entry as wrapping the ring. */ - rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE); + rxdesc->status |= cpu_to_le32(RD_RDLE); memset(mdp->tx_ring, 0, tx_ringsize); @@ -1220,8 +1171,8 @@ static void sh_eth_ring_format(struct net_device *ndev) for (i = 0; i < mdp->num_tx_ring; i++) { mdp->tx_skbuff[i] = NULL; txdesc = &mdp->tx_ring[i]; - txdesc->status = cpu_to_edmac(mdp, TD_TFP); - txdesc->len = cpu_to_edmac(mdp, 0); + txdesc->status = cpu_to_le32(TD_TFP); + txdesc->len = cpu_to_le32(0); if (i == 0) { /* Tx descriptor address set */ sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); @@ -1231,7 +1182,7 @@ static void sh_eth_ring_format(struct net_device *ndev) } } - txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); + txdesc->status |= cpu_to_le32(TD_TDLE); } /* Get skb and descriptor buffer */ @@ -1289,7 +1240,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) { int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); - u32 val; /* Soft Reset */ ret = sh_eth_reset(ndev); @@ -1342,10 +1292,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) } /* PAUSE Prohibition */ - val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | - ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; - - sh_eth_write(ndev, val, ECMR); + sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE, ECMR); if (mdp->cd->set_rate) mdp->cd->set_rate(ndev); @@ -1387,7 +1335,7 @@ static void sh_eth_dev_exit(struct net_device *ndev) * packet boundary if it's currently running */ for (i = 0; i < mdp->num_tx_ring; i++) - mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); + mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); /* Disable TX FIFO egress to MAC */ sh_eth_rcv_snd_disable(ndev); @@ -1419,29 +1367,28 @@ static int sh_eth_txfree(struct net_device *ndev) for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { entry = mdp->dirty_tx % mdp->num_tx_ring; txdesc = &mdp->tx_ring[entry]; - if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) + if (txdesc->status & cpu_to_le32(TD_TACT)) break; /* TACT bit must be checked before all the following reads */ dma_rmb(); netif_info(mdp, tx_done, ndev, "tx entry %d status 0x%08x\n", - entry, edmac_to_cpu(mdp, txdesc->status)); + entry, le32_to_cpu(txdesc->status)); /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { - dma_unmap_single(&ndev->dev, - edmac_to_cpu(mdp, txdesc->addr), - edmac_to_cpu(mdp, txdesc->len) >> 16, + dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), + le32_to_cpu(txdesc->len) >> 16, DMA_TO_DEVICE); dev_kfree_skb_irq(mdp->tx_skbuff[entry]); mdp->tx_skbuff[entry] = NULL; free_num++; } - txdesc->status = cpu_to_edmac(mdp, TD_TFP); + txdesc->status = cpu_to_le32(TD_TFP); if (entry >= mdp->num_tx_ring - 1) - txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); + txdesc->status |= cpu_to_le32(TD_TDLE); ndev->stats.tx_packets++; - ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16; + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; } return free_num; } @@ -1465,11 +1412,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) boguscnt = min(boguscnt, *quota); limit = boguscnt; rxdesc = &mdp->rx_ring[entry]; - while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { /* RACT bit must be checked before all the following reads */ dma_rmb(); - desc_status = edmac_to_cpu(mdp, rxdesc->status); - pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL; + desc_status = le32_to_cpu(rxdesc->status); + pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL; if (--boguscnt < 0) break; @@ -1507,7 +1454,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (desc_status & RD_RFS10) ndev->stats.rx_over_errors++; } else if (skb) { - dma_addr = edmac_to_cpu(mdp, rxdesc->addr); + dma_addr = le32_to_cpu(rxdesc->addr); if (!mdp->cd->hw_swap) sh_eth_soft_swap( phys_to_virt(ALIGN(dma_addr, 4)), @@ -1536,7 +1483,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) rxdesc = &mdp->rx_ring[entry]; /* The size of the buffer is 32 byte boundary. */ buf_len = ALIGN(mdp->rx_buf_sz, 32); - rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); + rxdesc->len = cpu_to_le32(buf_len << 16); if (mdp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(ndev, skbuff_size); @@ -1552,15 +1499,14 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) mdp->rx_skbuff[entry] = skb; skb_checksum_none_assert(skb); - rxdesc->addr = cpu_to_edmac(mdp, dma_addr); + rxdesc->addr = cpu_to_le32(dma_addr); } dma_wmb(); /* RACT bit must be set after all the above writes */ if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= - cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE); + cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE); else - rxdesc->status |= - cpu_to_edmac(mdp, RD_RACT | RD_RFP); + rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP); } /* Restart Rx engine if stopped. */ @@ -1877,8 +1823,7 @@ static int sh_eth_phy_init(struct net_device *ndev) return PTR_ERR(phydev); } - netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", - phydev->addr, phydev->irq, phydev->drv->name); + phy_attached_info(phydev); mdp->phydev = phydev; @@ -2360,8 +2305,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < mdp->num_rx_ring; i++) { rxdesc = &mdp->rx_ring[i]; - rxdesc->status = cpu_to_edmac(mdp, 0); - rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0); + rxdesc->status = cpu_to_le32(0); + rxdesc->addr = cpu_to_le32(0xBADF00D0); dev_kfree_skb(mdp->rx_skbuff[i]); mdp->rx_skbuff[i] = NULL; } @@ -2409,14 +2354,14 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) kfree_skb(skb); return NETDEV_TX_OK; } - txdesc->addr = cpu_to_edmac(mdp, dma_addr); - txdesc->len = cpu_to_edmac(mdp, skb->len << 16); + txdesc->addr = cpu_to_le32(dma_addr); + txdesc->len = cpu_to_le32(skb->len << 16); dma_wmb(); /* TACT bit must be set after all the above writes */ if (entry >= mdp->num_tx_ring - 1) - txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); + txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); else - txdesc->status |= cpu_to_edmac(mdp, TD_TACT); + txdesc->status |= cpu_to_le32(TD_TACT); mdp->cur_tx++; @@ -2911,7 +2856,7 @@ static int sh_mdio_release(struct sh_eth_private *mdp) static int sh_mdio_init(struct sh_eth_private *mdp, struct sh_eth_plat_data *pd) { - int ret, i; + int ret; struct bb_info *bitbang; struct platform_device *pdev = mdp->pdev; struct device *dev = &mdp->pdev->dev; @@ -2924,10 +2869,6 @@ static int sh_mdio_init(struct sh_eth_private *mdp, /* bitbang init */ bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; bitbang->set_gate = pd->set_mdio_gate; - bitbang->mdi_msk = PIR_MDI; - bitbang->mdo_msk = PIR_MDO; - bitbang->mmd_msk = PIR_MMD; - bitbang->mdc_msk = PIR_MDC; bitbang->ctrl.ops = &bb_ops; /* MII controller setting */ @@ -2941,20 +2882,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp, snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); - /* PHY IRQ */ - mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int), - GFP_KERNEL); - if (!mdp->mii_bus->irq) { - ret = -ENOMEM; - goto out_free_bus; - } - /* register MDIO bus */ if (dev->of_node) { ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); } else { - for (i = 0; i < PHY_MAX_ADDR; i++) - mdp->mii_bus->irq[i] = PHY_POLL; if (pd->phy_irq > 0) mdp->mii_bus->irq[pd->phy] = pd->phy_irq; @@ -3126,8 +3057,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* get PHY ID */ mdp->phy_id = pd->phy; mdp->phy_interface = pd->phy_interface; - /* EDMAC endian */ - mdp->edmac_endian = pd->edmac_endian; mdp->no_ether_link = pd->no_ether_link; mdp->ether_link_active_low = pd->ether_link_active_low; @@ -3307,13 +3236,6 @@ static struct platform_device_id sh_eth_id_table[] = { { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, - { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data }, - { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, - { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, - { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data }, - { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data }, - { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data }, - { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data }, { } }; MODULE_DEVICE_TABLE(platform, sh_eth_id_table); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 72fcfc924589..8fa4ef3a7fdd 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -513,7 +513,6 @@ struct sh_eth_private { u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ u32 cur_tx, dirty_tx; u32 rx_buf_sz; /* Based on MTU+slack. */ - int edmac_endian; struct napi_struct napi; bool irq_enabled; /* MII transceiver section. */ diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index e9f2349e98bc..a4ab71d43e4e 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4998,7 +4998,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; dev->switchdev_ops = &rocker_port_switchdev_ops; - netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, NAPI_POLL_WEIGHT); netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, NAPI_POLL_WEIGHT); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c index 43ccb4a6de15..467ff7033606 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c @@ -180,7 +180,7 @@ int sxgbe_mdio_register(struct net_device *ndev) } for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - struct phy_device *phy = mdio_bus->phy_map[phy_addr]; + struct phy_device *phy = mdiobus_get_phy(mdio_bus, phy_addr); if (phy) { char irq_num[4]; @@ -216,7 +216,7 @@ int sxgbe_mdio_register(struct net_device *ndev) } netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", phy->phy_id, phy_addr, irq_str, - dev_name(&phy->dev), act ? " active" : ""); + phydev_name(phy), act ? " active" : ""); phy_found = true; } } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e6a084a6be12..98d33d462c6c 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -181,13 +181,6 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); if (!(nic_data->datapath_caps & - (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { - netif_err(efx, drv, efx->net_dev, - "current firmware does not support TSO\n"); - return -ENODEV; - } - - if (!(nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { netif_err(efx, probe, efx->net_dev, "current firmware does not support an RX prefix\n"); @@ -493,10 +486,17 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); for (i = 0; i < n; i++) { - rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) { + /* Don't display the MC error if we didn't have space + * for a VF. + */ + if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) + efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, + 0, outbuf, outlen, rc); break; + } if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { rc = -EIO; break; @@ -1797,6 +1797,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); tx_queue->write_count = 1; + + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { + tx_queue->tso_version = 1; + } + wmb(); efx_ef10_push_tx_desc(tx_queue, txd); @@ -2375,8 +2381,19 @@ static int efx_ef10_ev_init(struct efx_channel *channel) 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { netif_info(efx, drv, efx->net_dev, "other functions on NIC have been reset\n"); - /* MC's boot count has incremented */ - ++nic_data->warm_boot_count; + + /* With MCFW v4.6.x and earlier, the + * boot count will have incremented, + * so re-read the warm_boot_count + * value now to ensure this function + * doesn't think it has changed next + * time it checks. + */ + rc = efx_ef10_get_warm_boot_count(efx); + if (rc >= 0) { + nic_data->warm_boot_count = rc; + rc = 0; + } } nic_data->workaround_26807 = true; } else if (rc == -EPERM) { @@ -3823,13 +3840,12 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, table->entry[filter_idx].handle); - rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, + sizeof(inbuf), NULL, 0, NULL); if (rc) - netdev_WARN(efx->net_dev, - "filter_idx=%#x handle=%#llx\n", - filter_idx, - table->entry[filter_idx].handle); + netif_info(efx, drv, efx->net_dev, + "%s: filter %04x remove failed\n", + __func__, filter_idx); kfree(spec); } @@ -3838,11 +3854,14 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) } #define EFX_EF10_FILTER_DO_MARK_OLD(id) \ - if (id != EFX_EF10_FILTER_ID_INVALID) { \ - filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ - WARN_ON(!table->entry[filter_idx].spec); \ - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \ - } + if (id != EFX_EF10_FILTER_ID_INVALID) { \ + filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ + if (!table->entry[filter_idx].spec) \ + netif_dbg(efx, drv, efx->net_dev, \ + "%s: marked null spec old %04x:%04x\n", \ + __func__, id, filter_idx); \ + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\ + } static void efx_ef10_filter_mark_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; @@ -4016,9 +4035,10 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - netif_warn(efx, drv, efx->net_dev, - "%scast mismatch filter insert failed rc=%d\n", - multicast ? "Multi" : "Uni", rc); + netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, + efx->net_dev, + "%scast mismatch filter insert failed rc=%d\n", + multicast ? "Multi" : "Uni", rc); } else if (multicast) { table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); if (!nic_data->workaround_26807) { @@ -4060,19 +4080,31 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, static void efx_ef10_filter_remove_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - bool remove_failed = false; + int remove_failed = 0; + int remove_noent = 0; + int rc; int i; for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (ACCESS_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { - if (efx_ef10_filter_remove_internal( - efx, 1U << EFX_FILTER_PRI_AUTO, - i, true) < 0) - remove_failed = true; + rc = efx_ef10_filter_remove_internal(efx, + 1U << EFX_FILTER_PRI_AUTO, i, true); + if (rc == -ENOENT) + remove_noent++; + else if (rc) + remove_failed++; } } - WARN_ON(remove_failed); + + if (remove_failed) + netif_info(efx, drv, efx->net_dev, + "%s: failed to remove %d filters\n", + __func__, remove_failed); + if (remove_noent) + netif_info(efx, drv, efx->net_dev, + "%s: failed to remove %d non-existent filters\n", + __func__, remove_noent); } static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index a3c42a376741..0705ec869487 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2059,7 +2059,6 @@ static void efx_init_napi_channel(struct efx_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); - napi_hash_add(&channel->napi_str); efx_channel_busy_poll_init(channel); } @@ -2785,6 +2784,12 @@ static const struct pci_device_id efx_pci_table[] = { .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, {0} /* end of list */ }; @@ -3123,10 +3128,10 @@ static int efx_pci_probe(struct pci_dev *pci_dev, net_dev->features |= (efx->type->offload_features | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_RXCSUM); - if (efx->type->offload_features & NETIF_F_V6_CSUM) + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ - net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | NETIF_F_RXCSUM); /* All offloads can be toggled */ @@ -3169,14 +3174,15 @@ static int efx_pci_probe(struct pci_dev *pci_dev, rtnl_lock(); rc = efx_mtd_probe(efx); rtnl_unlock(); - if (rc) + if (rc && rc != -EPERM) netif_warn(efx, probe, efx->net_dev, "failed to create MTDs (%d)\n", rc); rc = pci_enable_pcie_error_reporting(pci_dev); if (rc && rc != -EINVAL) - netif_warn(efx, probe, efx->net_dev, - "pci_enable_pcie_error_reporting failed (%d)\n", rc); + netif_notice(efx, probe, efx->net_dev, + "PCIE error reporting unavailable (%d).\n", + rc); return 0; diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 41fb6b60a3f0..d28e7dd8fa3c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -82,6 +82,7 @@ int efx_mcdi_init(struct efx_nic *efx) mcdi->logging_enabled = mcdi_logging_default; #endif init_waitqueue_head(&mcdi->wq); + init_waitqueue_head(&mcdi->proxy_rx_wq); spin_lock_init(&mcdi->iface_lock); mcdi->state = MCDI_STATE_QUIESCENT; mcdi->mode = MCDI_MODE_POLL; @@ -315,6 +316,7 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) } #endif + mcdi->resprc_raw = 0; if (error && mcdi->resp_data_len == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); mcdi->resprc = -EIO; @@ -325,8 +327,8 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) mcdi->resprc = -EIO; } else if (error) { efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); - mcdi->resprc = - efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); + mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0); + mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw); } else { mcdi->resprc = 0; } @@ -621,9 +623,30 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) return 0; } -static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, +static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx, + size_t hdr_len, size_t data_len, + u32 *proxy_handle) +{ + MCDI_DECLARE_BUF_ERR(testbuf); + const size_t buflen = sizeof(testbuf); + + if (!proxy_handle || data_len < buflen) + return false; + + efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen); + if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) { + *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE); + return true; + } + + return false; +} + +static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual, bool quiet) + size_t *outlen_actual, bool quiet, + u32 *proxy_handle, int *raw_rc) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); MCDI_DECLARE_BUF_ERR(errbuf); @@ -657,6 +680,9 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, spin_unlock_bh(&mcdi->iface_lock); } + if (proxy_handle) + *proxy_handle = 0; + if (rc != 0) { if (outlen_actual) *outlen_actual = 0; @@ -669,6 +695,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); rc = mcdi->resprc; + if (raw_rc) + *raw_rc = mcdi->resprc_raw; hdr_len = mcdi->resp_hdr_len; data_len = mcdi->resp_data_len; err_len = min(sizeof(errbuf), data_len); @@ -689,6 +717,12 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", -rc); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } else if (proxy_handle && (rc == -EPROTO) && + efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, + proxy_handle)) { + mcdi->proxy_rx_status = 0; + mcdi->proxy_rx_handle = 0; + mcdi->state = MCDI_STATE_PROXY_WAIT; } else if (rc && !quiet) { efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, rc); @@ -701,34 +735,195 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, } } - efx_mcdi_release(mcdi); + if (!proxy_handle || !*proxy_handle) + efx_mcdi_release(mcdi); return rc; } -static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, +static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi) +{ + if (mcdi->state == MCDI_STATE_PROXY_WAIT) { + /* Interrupt the proxy wait. */ + mcdi->proxy_rx_status = -EINTR; + wake_up(&mcdi->proxy_rx_wq); + } +} + +static void efx_mcdi_ev_proxy_response(struct efx_nic *efx, + u32 handle, int status) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT); + + mcdi->proxy_rx_status = efx_mcdi_errno(status); + /* Ensure the status is written before we update the handle, since the + * latter is used to check if we've finished. + */ + wmb(); + mcdi->proxy_rx_handle = handle; + wake_up(&mcdi->proxy_rx_wq); +} + +static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + /* Wait for a proxy event, or timeout. */ + rc = wait_event_timeout(mcdi->proxy_rx_wq, + mcdi->proxy_rx_handle != 0 || + mcdi->proxy_rx_status == -EINTR, + MCDI_RPC_TIMEOUT); + + if (rc <= 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy timeout %d\n", handle); + return -ETIMEDOUT; + } else if (mcdi->proxy_rx_handle != handle) { + netif_warn(efx, hw, efx->net_dev, + "MCDI proxy unexpected handle %d (expected %d)\n", + mcdi->proxy_rx_handle, handle); + return -EINVAL; + } + + return mcdi->proxy_rx_status; +} + +static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual, bool quiet) + size_t *outlen_actual, bool quiet, int *raw_rc) { + u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */ int rc; + if (inbuf && inlen && (inbuf == outbuf)) { + /* The input buffer can't be aliased with the output. */ + WARN_ON(1); + return -EINVAL; + } + rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); - if (rc) { - if (outlen_actual) - *outlen_actual = 0; + if (rc) return rc; + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, quiet, &proxy_handle, raw_rc); + + if (proxy_handle) { + /* Handle proxy authorisation. This allows approval of MCDI + * operations to be delegated to the admin function, allowing + * fine control over (eg) multicast subscriptions. + */ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + netif_dbg(efx, hw, efx->net_dev, + "MCDI waiting for proxy auth %d\n", + proxy_handle); + rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet); + + if (rc == 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy retry %d\n", proxy_handle); + + /* We now retry the original request. */ + mcdi->state = MCDI_STATE_RUNNING_SYNC; + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, + outbuf, outlen, outlen_actual, + quiet, NULL, raw_rc); + } else { + netif_printk(efx, hw, + rc == -EPERM ? KERN_DEBUG : KERN_ERR, + efx->net_dev, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); + + if (rc == -EINTR || rc == -EIO) + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + efx_mcdi_release(mcdi); + } } - return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, - outlen_actual, quiet); + + return rc; } +static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + int raw_rc = 0; + int rc; + + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, true, &raw_rc); + + if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + efx->type->is_vf) { + /* If the EVB port isn't available within a VF this may + * mean the PF is still bringing the switch up. We should + * retry our request shortly. + */ + unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT; + unsigned int delay_us = 10000; + + netif_dbg(efx, hw, efx->net_dev, + "%s: NO_EVB_PORT; will retry request\n", + __func__); + + do { + usleep_range(delay_us, delay_us + 10000); + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, + true, &raw_rc); + if (delay_us < 100000) + delay_us <<= 1; + } while ((rc == -EPROTO) && + (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + time_before(jiffies, abort_time)); + } + + if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO)) + efx_mcdi_display_error(efx, cmd, inlen, + outbuf, outlen, rc); + + return rc; +} + +/** + * efx_mcdi_rpc - Issue an MCDI command and wait for completion + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * + * This function may sleep and therefore must be called in an appropriate + * context. + * + * Return: A negative error code, or zero if successful. The error + * code may come from the MCDI response or may indicate a failure + * to communicate with the MC. In the former case, the response + * will still be copied to @outbuf and *@outlen_actual will be + * set accordingly. In the latter case, *@outlen_actual will be + * set to zero. + */ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { - return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, - outlen_actual, false); + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, false); } /* Normally, on receiving an error code in the MCDI response, @@ -744,8 +939,8 @@ int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { - return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, - outlen_actual, true); + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, true); } int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, @@ -866,7 +1061,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, - outlen_actual, false); + outlen_actual, false, NULL, NULL); } int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, @@ -874,7 +1069,7 @@ int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, - outlen_actual, true); + outlen_actual, true, NULL, NULL); } void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, @@ -887,9 +1082,10 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, code = MCDI_DWORD(outbuf, ERR_CODE); if (outlen >= MC_CMD_ERR_ARG_OFST + 4) err_arg = MCDI_DWORD(outbuf, ERR_ARG); - netif_err(efx, hw, efx->net_dev, - "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n", - cmd, (int)inlen, rc, code, err_arg); + netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR, + efx->net_dev, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various @@ -1014,8 +1210,13 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) * receiving a REBOOT event after posting the MCDI * request. Did the mc reboot before or after the copyout? The * best we can do always is just return failure. + * + * If there is an outstanding proxy response expected it is not going + * to arrive. We should thus abort it. */ spin_lock(&mcdi->iface_lock); + efx_mcdi_proxy_abort(mcdi); + if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = rc; @@ -1063,6 +1264,8 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx) spin_lock(&mcdi->iface_lock); efx->mc_bist_for_other_fn = true; + efx_mcdi_proxy_abort(mcdi); + if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = -EIO; @@ -1171,6 +1374,11 @@ void efx_mcdi_process_event(struct efx_channel *channel, EFX_QWORD_VAL(*event)); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); break; + case MCDI_EVENT_CODE_PROXY_RESPONSE: + efx_mcdi_ev_proxy_response(efx, + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE), + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); + break; default: netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", code); diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 025d504c472b..c9aeb0701c9a 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -17,6 +17,8 @@ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending. * Only the thread that moved into this state is allowed to move out of it. * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending. + * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that + * indicates we must wait for a proxy try again message. * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread * has not yet consumed the result. For all other threads, equivalent to * %MCDI_STATE_RUNNING. @@ -25,6 +27,7 @@ enum efx_mcdi_state { MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC, MCDI_STATE_RUNNING_ASYNC, + MCDI_STATE_PROXY_WAIT, MCDI_STATE_COMPLETED, }; @@ -60,6 +63,9 @@ enum efx_mcdi_mode { * @async_timer: Timer for asynchronous request timeout * @logging_buffer: buffer that may be used to build MCDI tracing messages * @logging_enabled: whether to trace MCDI + * @proxy_rx_handle: Most recently received proxy authorisation handle + * @proxy_rx_status: Status of most recent proxy authorisation + * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle */ struct efx_mcdi_iface { struct efx_nic *efx; @@ -71,6 +77,7 @@ struct efx_mcdi_iface { unsigned int credits; unsigned int seqno; int resprc; + int resprc_raw; size_t resp_hdr_len; size_t resp_data_len; spinlock_t async_lock; @@ -80,6 +87,9 @@ struct efx_mcdi_iface { char *logging_buffer; bool logging_enabled; #endif + unsigned int proxy_rx_handle; + int proxy_rx_status; + wait_queue_head_t proxy_rx_wq; }; struct efx_mcdi_mon { diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index a8ddd122f685..38c422321cda 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -182,6 +182,7 @@ struct efx_tx_buffer { * * @efx: The associated Efx NIC * @queue: DMA queue number + * @tso_version: Version of TSO in use for this queue. * @channel: The associated channel * @core_txq: The networking core TX queue structure * @buffer: The software buffer ring @@ -228,6 +229,7 @@ struct efx_tx_queue { /* Members which don't change on the fast path */ struct efx_nic *efx ____cacheline_aligned_in_smp; unsigned queue; + unsigned int tso_version; struct efx_channel *channel; struct netdev_queue *core_txq; struct efx_tx_buffer *buffer; @@ -1502,8 +1504,9 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, * same cycle, the XMAC can miss the IPG altogether. We work around * this by adding a further 16 bytes. */ +#define EFX_FRAME_PAD 16 #define EFX_MAX_FRAME_LEN(mtu) \ - ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) + (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8)) static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) { diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 809ea4610a77..8956995b2fe7 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -463,7 +463,6 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, skb_record_rx_queue(skb, channel->rx_queue.core_index); - skb_mark_napi_id(skb, &channel->napi_str); gro_result = napi_gro_frags(napi); if (gro_result != GRO_DROP) channel->irq_mod_score += 2; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 67f6afaa022f..f7a0ec1bca97 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -1010,13 +1010,17 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, /* Parse the SKB header and initialise state. */ static int tso_start(struct tso_state *st, struct efx_nic *efx, + struct efx_tx_queue *tx_queue, const struct sk_buff *skb) { - bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; struct device *dma_dev = &efx->pci_dev->dev; unsigned int header_len, in_len; + bool use_opt_desc = false; dma_addr_t dma_addr; + if (tx_queue->tso_version == 1) + use_opt_desc = true; + st->ip_off = skb_network_header(skb) - skb->data; st->tcp_off = skb_transport_header(skb) - skb->data; header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); @@ -1271,7 +1275,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); - rc = tso_start(&state, efx, skb); + rc = tso_start(&state, efx, tx_queue, skb); if (rc) goto mem_err; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 219a99b7a631..8af25563f627 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -864,8 +864,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) for (i = 0; i < 10; i++) { /* Set PHY to 10/FD, no ANEG, and loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, - BMCR_LOOPBACK | BMCR_FULLDPLX); + smsc911x_mii_write(phy_dev->mdio.bus, phy_dev->mdio.addr, + MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX); /* Enable MAC tx/rx, FD */ spin_lock_irqsave(&pdata->mac_lock, flags); @@ -893,7 +893,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) spin_unlock_irqrestore(&pdata->mac_lock, flags); /* Cancel PHY loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); + smsc911x_mii_write(phy_dev->mdio.bus, phy_dev->mdio.addr, MII_BMCR, 0); smsc911x_reg_write(pdata, TX_CFG, 0); smsc911x_reg_write(pdata, RX_CFG, 0); @@ -1021,7 +1021,7 @@ static int smsc911x_mii_probe(struct net_device *dev) } SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", - phydev->addr, phydev->phy_id); + phydev->mdio.addr, phydev->phy_id); ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link, pdata->config.phy_interface); @@ -1031,9 +1031,7 @@ static int smsc911x_mii_probe(struct net_device *dev) return ret; } - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + phy_attached_info(phydev); /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | @@ -1061,7 +1059,7 @@ static int smsc911x_mii_init(struct platform_device *pdev, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - int err = -ENXIO, i; + int err = -ENXIO; pdata->mii_bus = mdiobus_alloc(); if (!pdata->mii_bus) { @@ -1075,9 +1073,7 @@ static int smsc911x_mii_init(struct platform_device *pdev, pdata->mii_bus->priv = pdata; pdata->mii_bus->read = smsc911x_mii_read; pdata->mii_bus->write = smsc911x_mii_write; - pdata->mii_bus->irq = pdata->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pdata->mii_bus->irq[i] = PHY_POLL; + memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus)); pdata->mii_bus->parent = &pdev->dev; @@ -1992,7 +1988,8 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, } for (i = 0; i <= 31; i++) - data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); + data[j++] = smsc911x_mii_read(phy_dev->mdio.bus, + phy_dev->mdio.addr, i); } static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 4a90cdae5444..8594b9e8b28b 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -78,7 +78,6 @@ struct smsc9420_pdata { struct phy_device *phy_dev; struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; int last_duplex; int last_carrier; }; @@ -316,7 +315,8 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, return; for (i = 0; i <= 31; i++) - data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); + data[j++] = smsc9420_mii_read(phy_dev->mdio.bus, + phy_dev->mdio.addr, i); } static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) @@ -1158,16 +1158,13 @@ static int smsc9420_mii_probe(struct net_device *dev) BUG_ON(pd->phy_dev); /* Device only supports internal PHY at address 1 */ - if (!pd->mii_bus->phy_map[1]) { + phydev = mdiobus_get_phy(pd->mii_bus, 1); + if (!phydev) { netdev_err(dev, "no PHY found at address 1\n"); return -ENODEV; } - phydev = pd->mii_bus->phy_map[1]; - netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n", - phydev->addr, phydev->phy_id); - - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -1175,14 +1172,13 @@ static int smsc9420_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); - /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause); phydev->advertising = phydev->supported; + phy_attached_info(phydev); + pd->phy_dev = phydev; pd->last_duplex = -1; pd->last_carrier = -1; @@ -1193,7 +1189,7 @@ static int smsc9420_mii_probe(struct net_device *dev) static int smsc9420_mii_init(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); - int err = -ENXIO, i; + int err = -ENXIO; pd->mii_bus = mdiobus_alloc(); if (!pd->mii_bus) { @@ -1206,9 +1202,6 @@ static int smsc9420_mii_init(struct net_device *dev) pd->mii_bus->priv = pd; pd->mii_bus->read = smsc9420_mii_read; pd->mii_bus->write = smsc9420_mii_write; - pd->mii_bus->irq = pd->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pd->mii_bus->irq[i] = PHY_POLL; /* Mask all PHYs except ID 1 (internal) */ pd->mii_bus->phy_mask = ~(1 << 1); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 623c6ed8764a..1e19c8fd8b82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -137,6 +137,31 @@ struct stmmac_extra_stats { unsigned long pcs_link; unsigned long pcs_duplex; unsigned long pcs_speed; + /* debug register */ + unsigned long mtl_tx_status_fifo_full; + unsigned long mtl_tx_fifo_not_empty; + unsigned long mmtl_fifo_ctrl; + unsigned long mtl_tx_fifo_read_ctrl_write; + unsigned long mtl_tx_fifo_read_ctrl_wait; + unsigned long mtl_tx_fifo_read_ctrl_read; + unsigned long mtl_tx_fifo_read_ctrl_idle; + unsigned long mac_tx_in_pause; + unsigned long mac_tx_frame_ctrl_xfer; + unsigned long mac_tx_frame_ctrl_idle; + unsigned long mac_tx_frame_ctrl_wait; + unsigned long mac_tx_frame_ctrl_pause; + unsigned long mac_gmii_tx_proto_engine; + unsigned long mtl_rx_fifo_fill_level_full; + unsigned long mtl_rx_fifo_fill_above_thresh; + unsigned long mtl_rx_fifo_fill_below_thresh; + unsigned long mtl_rx_fifo_fill_level_empty; + unsigned long mtl_rx_fifo_read_ctrl_flush; + unsigned long mtl_rx_fifo_read_ctrl_read_data; + unsigned long mtl_rx_fifo_read_ctrl_status; + unsigned long mtl_rx_fifo_read_ctrl_idle; + unsigned long mtl_rx_fifo_ctrl_active; + unsigned long mac_rx_frame_ctrl_fifo; + unsigned long mac_gmii_rx_proto_engine; }; /* CSR Frequency Access Defines*/ @@ -408,12 +433,13 @@ struct stmmac_ops { void (*set_eee_pls)(struct mac_device_info *hw, int link); void (*ctrl_ane)(struct mac_device_info *hw, bool restart); void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv); + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); }; /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); - void (*config_sub_second_increment) (void __iomem *ioaddr); + u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); int (*config_addend) (void __iomem *ioaddr, u32 addend); int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 82de68b1a452..36d3355f2fb0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -198,19 +198,19 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) return 0; } -static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) +static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) { struct device *dev = &gmac->pdev->dev; gmac->phy_mode = of_get_phy_mode(dev->of_node); if (gmac->phy_mode < 0) { dev_err(dev, "missing phy mode property\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) { dev_err(dev, "missing qcom id property\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } /* The GMACs are called 1 to 4 in the documentation, but to simplify the @@ -219,13 +219,13 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) */ if (gmac->id < 0 || gmac->id > 3) { dev_err(dev, "invalid gmac id\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } gmac->core_clk = devm_clk_get(dev, "stmmaceth"); if (IS_ERR(gmac->core_clk)) { dev_err(dev, "missing stmmaceth clk property\n"); - return gmac->core_clk; + return PTR_ERR(gmac->core_clk); } clk_set_rate(gmac->core_clk, 266000000); @@ -234,18 +234,16 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) "qcom,nss-common"); if (IS_ERR(gmac->nss_common)) { dev_err(dev, "missing nss-common node\n"); - return gmac->nss_common; + return PTR_ERR(gmac->nss_common); } /* Setup the register map for the qsgmii csr registers */ gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node, "qcom,qsgmii-csr"); - if (IS_ERR(gmac->qsgmii_csr)) { + if (IS_ERR(gmac->qsgmii_csr)) dev_err(dev, "missing qsgmii-csr node\n"); - return gmac->qsgmii_csr; - } - return NULL; + return PTR_ERR_OR_ZERO(gmac->qsgmii_csr); } static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) @@ -262,7 +260,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct ipq806x_gmac *gmac; int val; - void *err; + int err; val = stmmac_get_platform_resources(pdev, &stmmac_res); if (val) @@ -279,9 +277,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); - if (IS_ERR(err)) { + if (err) { dev_err(dev, "device tree parsing error\n"); - return PTR_ERR(err); + return err; } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 401383b252a8..f0d797ab74d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -32,6 +32,7 @@ #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 #define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 +#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 #define EMAC_SPLITTER_CTRL_REG 0x0 #define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 @@ -47,6 +48,7 @@ struct socfpga_dwmac { struct regmap *sys_mgr_base_addr; struct reset_control *stmmac_rst; void __iomem *splitter_base; + bool f2h_ptp_ref_clk; }; static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) @@ -116,6 +118,8 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * return -EINVAL; } + dwmac->f2h_ptp_ref_clk = of_property_read_bool(np, "altr,f2h_ptp_ref_clk"); + np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); if (np_splitter) { if (of_address_to_resource(np_splitter, 0, &res_splitter)) { @@ -171,6 +175,11 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; + if (dwmac->f2h_ptp_ref_clk) + ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); + else + ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); + regmap_write(sys_mgr_base_addr, reg_offset, ctrl); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index b3fe0575ff6b..8831a053ac13 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -34,6 +34,7 @@ #define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ #define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC_DEBUG 0x00000024 /* GMAC debug register */ #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ #define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ @@ -177,6 +178,47 @@ enum inter_frame_gap { #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ +/* DEBUG Register defines */ +/* MTL TxStatus FIFO */ +#define GMAC_DEBUG_TXSTSFSTS BIT(25) /* MTL TxStatus FIFO Full Status */ +#define GMAC_DEBUG_TXFSTS BIT(24) /* MTL Tx FIFO Not Empty Status */ +#define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */ +/* MTL Tx FIFO Read Controller Status */ +#define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20) +#define GMAC_DEBUG_TRCSTS_SHIFT 20 +#define GMAC_DEBUG_TRCSTS_IDLE 0 +#define GMAC_DEBUG_TRCSTS_READ 1 +#define GMAC_DEBUG_TRCSTS_TXW 2 +#define GMAC_DEBUG_TRCSTS_WRITE 3 +#define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */ +/* MAC Transmit Frame Controller Status */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +/* MAC GMII or MII Transmit Protocol Engine Status */ +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */ +#define GMAC_DEBUG_RXFSTS_SHIFT 8 +#define GMAC_DEBUG_RXFSTS_EMPTY 0 +#define GMAC_DEBUG_RXFSTS_BT 1 +#define GMAC_DEBUG_RXFSTS_AT 2 +#define GMAC_DEBUG_RXFSTS_FULL 3 +#define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */ +#define GMAC_DEBUG_RRCSTS_SHIFT 5 +#define GMAC_DEBUG_RRCSTS_IDLE 0 +#define GMAC_DEBUG_RRCSTS_RDATA 1 +#define GMAC_DEBUG_RRCSTS_RSTAT 2 +#define GMAC_DEBUG_RRCSTS_FLUSH 3 +#define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */ +/* MAC Receive Frame Controller FIFO Status */ +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +/* MAC GMII or MII Receive Protocol Engine Status */ +#define GMAC_DEBUG_RPESTS BIT(0) + /*--- DMA BLOCK defines ---*/ /* DMA Bus Mode register defines */ #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 371a669d69fd..c2941172f6d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -397,6 +397,80 @@ static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; } +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & GMAC_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & GMAC_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & GMAC_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & GMAC_DEBUG_TRCSTS_MASK) + >> GMAC_DEBUG_TRCSTS_SHIFT; + if (trcsts == GMAC_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == GMAC_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == GMAC_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & GMAC_DEBUG_RXFSTS_MASK) + >> GMAC_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == GMAC_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & GMAC_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & GMAC_DEBUG_RRCSTS_MASK) >> + GMAC_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == GMAC_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + static const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, .rx_ipc = dwmac1000_rx_ipc_enable, @@ -413,6 +487,7 @@ static const struct stmmac_ops dwmac1000_ops = { .set_eee_pls = dwmac1000_set_eee_pls, .ctrl_ane = dwmac1000_ctrl_ane, .get_adv = dwmac1000_get_adv, + .debug = dwmac1000_debug, }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 2e51b816a7e8..4c6486cc80fb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -136,6 +136,31 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(irq_pcs_ane_n), STMMAC_STAT(irq_pcs_link_n), STMMAC_STAT(irq_rgmii_n), + /* DEBUG */ + STMMAC_STAT(mtl_tx_status_fifo_full), + STMMAC_STAT(mtl_tx_fifo_not_empty), + STMMAC_STAT(mmtl_fifo_ctrl), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_write), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_read), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle), + STMMAC_STAT(mac_tx_in_pause), + STMMAC_STAT(mac_tx_frame_ctrl_xfer), + STMMAC_STAT(mac_tx_frame_ctrl_idle), + STMMAC_STAT(mac_tx_frame_ctrl_wait), + STMMAC_STAT(mac_tx_frame_ctrl_pause), + STMMAC_STAT(mac_gmii_tx_proto_engine), + STMMAC_STAT(mtl_rx_fifo_fill_level_full), + STMMAC_STAT(mtl_rx_fifo_fill_above_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_below_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_level_empty), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_status), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle), + STMMAC_STAT(mtl_rx_fifo_ctrl_active), + STMMAC_STAT(mac_rx_frame_ctrl_fifo), + STMMAC_STAT(mac_gmii_rx_proto_engine), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -497,6 +522,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, if (val) priv->xstats.phy_eee_wakeup_error_n = val; } + + if ((priv->hw->mac->debug) && + (priv->synopsys_id >= DWMAC_CORE_3_50)) + priv->hw->mac->debug(priv->ioaddr, + (void *)&priv->xstats); } for (i = 0; i < STMMAC_STATS_LEN; i++) { char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 76ad214b4036..a77f68918010 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -33,22 +33,25 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) writel(data, ioaddr + PTP_TCR); } -static void stmmac_config_sub_second_increment(void __iomem *ioaddr) +static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, + u32 ptp_clock) { u32 value = readl(ioaddr + PTP_TCR); unsigned long data; /* Convert the ptp_clock to nano second - * formula = (1/ptp_clock) * 1000000000 + * formula = (2/ptp_clock) * 1000000000 * where, ptp_clock = 50MHz. */ - data = (1000000000ULL / 50000000); + data = (2000000000ULL / ptp_clock); /* 0.465ns accuracy */ if (!(value & PTP_TCR_TSCTRLSSR)) data = (data * 1000) / 465; writel(data, ioaddr + PTP_SSIR); + + return data; } static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a5b869eb4678..c21015b68097 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -53,6 +53,7 @@ #include "stmmac.h" #include <linux/reset.h> #include <linux/of_mdio.h> +#include "dwmac1000.h" #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) @@ -185,7 +186,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; - else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) + else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } } @@ -435,6 +436,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) u32 ts_master_en = 0; u32 ts_event_en = 0; u32 value = 0; + u32 sec_inc; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); @@ -598,24 +600,19 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ts_master_en | snap_type_sel); - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); /* program Sub Second Increment reg */ - priv->hw->ptp->config_sub_second_increment(priv->ioaddr); + sec_inc = priv->hw->ptp->config_sub_second_increment( + priv->ioaddr, priv->clk_ptp_rate); + temp = div_u64(1000000000ULL, sec_inc); /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = clk_ptp_ref_i/50MHz - * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; - * NOTE: clk_ptp_ref_i should be >= 50MHz to - * achieve 20ns accuracy. - * - * 2^x * y == (y << x), hence - * 2^32 * 50000000 ==> (50000000 << 32) + * where, freq_div_ratio = 1e9ns/sec_inc */ - temp = (u64) (50000000ULL << 32); + temp = (u64)(temp << 32); priv->default_addend = div_u64(temp, priv->clk_ptp_rate); priv->hw->ptp->config_addend(priv->ioaddr, priv->default_addend); @@ -2402,7 +2399,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, features &= ~NETIF_F_RXCSUM; if (!priv->plat->tx_coe) - features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_CSUM_MASK; /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames @@ -2410,7 +2407,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, * the TX csum insertionin the TDES and not use SF. */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) - features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_CSUM_MASK; return features; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index bba670c42e37..0faf16336035 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -29,7 +29,7 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> - +#include <linux/of_mdio.h> #include <asm/io.h> #include "stmmac.h" @@ -196,25 +196,37 @@ int stmmac_mdio_register(struct net_device *ndev) { int err = 0; struct mii_bus *new_bus; - int *irqlist; struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; int addr, found; + struct device_node *mdio_node = NULL; + struct device_node *child_node = NULL; if (!mdio_bus_data) return 0; + if (IS_ENABLED(CONFIG_OF)) { + for_each_child_of_node(priv->device->of_node, child_node) { + if (of_device_is_compatible(child_node, + "snps,dwmac-mdio")) { + mdio_node = child_node; + break; + } + } + + if (mdio_node) { + netdev_dbg(ndev, "FOUND MDIO subnode\n"); + } else { + netdev_warn(ndev, "No MDIO subnode found\n"); + } + } + new_bus = mdiobus_alloc(); if (new_bus == NULL) return -ENOMEM; - if (mdio_bus_data->irqs) { - irqlist = mdio_bus_data->irqs; - } else { - for (addr = 0; addr < PHY_MAX_ADDR; addr++) - priv->mii_irq[addr] = PHY_POLL; - irqlist = priv->mii_irq; - } + if (mdio_bus_data->irqs) + memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); #ifdef CONFIG_OF if (priv->device->of_node) @@ -228,10 +240,13 @@ int stmmac_mdio_register(struct net_device *ndev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; - new_bus->irq = irqlist; new_bus->phy_mask = mdio_bus_data->phy_mask; new_bus->parent = priv->device; - err = mdiobus_register(new_bus); + + if (mdio_node) + err = of_mdiobus_register(new_bus, mdio_node); + else + err = mdiobus_register(new_bus); if (err != 0) { pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); goto bus_register_fail; @@ -239,7 +254,7 @@ int stmmac_mdio_register(struct net_device *ndev) found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - struct phy_device *phydev = new_bus->phy_map[addr]; + struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); if (phydev) { int act = 0; char irq_num[4]; @@ -251,7 +266,8 @@ int stmmac_mdio_register(struct net_device *ndev) */ if ((mdio_bus_data->irqs == NULL) && (mdio_bus_data->probed_phy_irq > 0)) { - irqlist[addr] = mdio_bus_data->probed_phy_irq; + new_bus->irq[addr] = + mdio_bus_data->probed_phy_irq; phydev->irq = mdio_bus_data->probed_phy_irq; } @@ -278,13 +294,13 @@ int stmmac_mdio_register(struct net_device *ndev) } pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n", ndev->name, phydev->phy_id, addr, - irq_str, dev_name(&phydev->dev), + irq_str, phydev_name(phydev), act ? " active" : ""); found = 1; } } - if (!found) { + if (!found && !mdio_node) { pr_warn("%s: No PHY found\n", ndev->name); mdiobus_unregister(new_bus); mdiobus_free(new_bus); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index d02691ba3d7f..6a52fa18cbf2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -146,7 +146,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - if (plat->phy_node || plat->phy_bus_name) + if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name) plat->mdio_bus_data = NULL; else plat->mdio_bus_data = diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 9066d7a8483c..70814b7386b3 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -972,9 +972,7 @@ static int dwceqos_mii_probe(struct net_device *ndev) } if (netif_msg_probe(lp)) - netdev_dbg(lp->ndev, - "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n", - phydev, phydev->phy_id, phydev->addr); + phy_attached_info(phydev); phydev->supported &= PHY_GBIT_FEATURES; @@ -983,14 +981,6 @@ static int dwceqos_mii_probe(struct net_device *ndev) lp->duplex = DUPLEX_UNKNOWN; lp->phy_dev = phydev; - if (netif_msg_probe(lp)) { - netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n", - lp->phy_dev->addr, lp->phy_dev->phy_id); - - netdev_dbg(lp->ndev, "attach [%s] phy driver\n", - lp->phy_dev->drv->name); - } - return 0; } @@ -1230,7 +1220,7 @@ static void dwceqos_enable_mmc_interrupt(struct net_local *lp) static int dwceqos_mii_init(struct net_local *lp) { - int ret = -ENXIO, i; + int ret = -ENXIO; struct resource res; struct device_node *mdionode; @@ -1251,24 +1241,14 @@ static int dwceqos_mii_init(struct net_local *lp) lp->mii_bus->priv = lp; lp->mii_bus->parent = &lp->ndev->dev; - lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!lp->mii_bus->irq) { - ret = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; of_address_to_resource(lp->pdev->dev.of_node, 0, &res); snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); if (of_mdiobus_register(lp->mii_bus, mdionode)) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; return 0; -err_out_free_mdio_irq: - kfree(lp->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(lp->mii_bus); err_out: @@ -2107,7 +2087,7 @@ static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, dd = &lp->tx_descs[lp->tx_next]; /* Set DMA Descriptor fields */ - dd->des0 = dma_handle; + dd->des0 = dma_handle + consumed_size; dd->des1 = 0; dd->des2 = dma_size; @@ -2987,7 +2967,6 @@ static int dwceqos_remove(struct platform_device *pdev) if (lp->phy_dev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 77d26fe286c0..7eef45e6d70a 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -316,8 +316,6 @@ static int cpmac_mdio_reset(struct mii_bus *bus) return 0; } -static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; - static struct mii_bus *cpmac_mii; static void cpmac_set_multicast_list(struct net_device *dev) @@ -1118,7 +1116,7 @@ static int cpmac_probe(struct platform_device *pdev) for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { if (!(pdata->phy_mask & (1 << phy_id))) continue; - if (!cpmac_mii->phy_map[phy_id]) + if (!mdiobus_get_phy(cpmac_mii, phy_id)) continue; strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); break; @@ -1226,7 +1224,6 @@ int cpmac_init(void) cpmac_mii->read = cpmac_mdio_read; cpmac_mii->write = cpmac_mdio_write; cpmac_mii->reset = cpmac_mdio_reset; - cpmac_mii->irq = mii_irqs; cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index fc958067d10a..42fdfd4d9d4f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1159,8 +1159,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->data->phy_id, slave->slave_num); slave->phy = NULL; } else { - dev_info(priv->dev, "phy found : id is : 0x%x\n", - slave->phy->phy_id); + phy_attached_info(slave->phy); + phy_start(slave->phy); /* Configure GMII_SEL register */ @@ -2050,7 +2050,8 @@ static int cpsw_probe_dt(struct cpsw_priv *priv, if (!phy_dev) return -ENODEV; snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, phy_dev->bus->id, phy_dev->addr); + PHY_ID_FMT, phy_dev->mdio.bus->id, + phy_dev->mdio.addr); } else if (parp) { u32 phyid; struct device_node *mdio_node; @@ -2482,7 +2483,7 @@ static int cpsw_probe(struct platform_device *pdev) ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); - netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 33bd3b902304..5d9abedd6b75 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1644,10 +1644,7 @@ static int emac_dev_open(struct net_device *ndev) priv->speed = 0; priv->duplex = ~0; - dev_info(emac_dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, id=%x)\n", - priv->phydev->drv->name, dev_name(&priv->phydev->dev), - priv->phydev->phy_id); + phy_attached_info(priv->phydev); } if (!priv->phydev) { diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index c00084d689f3..4e7c9b9b042a 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -393,10 +393,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) /* scan and dump the bus */ for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phy = data->bus->phy_map[addr]; + phy = mdiobus_get_phy(data->bus, addr); if (phy) { dev_info(dev, "phy[%d]: device %s, driver %s\n", - phy->addr, dev_name(&phy->dev), + phy->mdio.addr, phydev_name(phy), phy->drv ? phy->drv->name : "unknown"); } } diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index bb1bb72121c0..17a26a429b71 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -113,7 +113,7 @@ struct netcp_intf { #define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS struct netcp_packet { struct sk_buff *skb; - u32 *epib; + __le32 *epib; u32 *psdata; unsigned int psdata_len; struct netcp_intf *netcp; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 37b9b39192ec..c61d66d38634 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -109,69 +109,80 @@ module_param(netcp_debug_level, int, 0); MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)"); /* Helper functions - Get/Set */ -static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc, +static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, struct knav_dma_desc *desc) { - *buff_len = desc->buff_len; - *buff = desc->buff; - *ndesc = desc->next_desc; + *buff_len = le32_to_cpu(desc->buff_len); + *buff = le32_to_cpu(desc->buff); + *ndesc = le32_to_cpu(desc->next_desc); } -static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc) +static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) { - *pad0 = desc->pad[0]; - *pad1 = desc->pad[1]; + *pad0 = le32_to_cpu(desc->pad[0]); + *pad1 = le32_to_cpu(desc->pad[1]); + *pad2 = le32_to_cpu(desc->pad[2]); } -static void get_org_pkt_info(u32 *buff, u32 *buff_len, +static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) +{ + u64 pad64; + + pad64 = le32_to_cpu(desc->pad[0]) + + ((u64)le32_to_cpu(desc->pad[1]) << 32); + *padptr = (void *)(uintptr_t)pad64; +} + +static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, struct knav_dma_desc *desc) { - *buff = desc->orig_buff; - *buff_len = desc->orig_len; + *buff = le32_to_cpu(desc->orig_buff); + *buff_len = le32_to_cpu(desc->orig_len); } -static void get_words(u32 *words, int num_words, u32 *desc) +static void get_words(dma_addr_t *words, int num_words, __le32 *desc) { int i; for (i = 0; i < num_words; i++) - words[i] = desc[i]; + words[i] = le32_to_cpu(desc[i]); } -static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc, +static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc, struct knav_dma_desc *desc) { - desc->buff_len = buff_len; - desc->buff = buff; - desc->next_desc = ndesc; + desc->buff_len = cpu_to_le32(buff_len); + desc->buff = cpu_to_le32(buff); + desc->next_desc = cpu_to_le32(ndesc); } static void set_desc_info(u32 desc_info, u32 pkt_info, struct knav_dma_desc *desc) { - desc->desc_info = desc_info; - desc->packet_info = pkt_info; + desc->desc_info = cpu_to_le32(desc_info); + desc->packet_info = cpu_to_le32(pkt_info); } -static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc) +static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) { - desc->pad[0] = pad0; - desc->pad[1] = pad1; + desc->pad[0] = cpu_to_le32(pad0); + desc->pad[1] = cpu_to_le32(pad1); + desc->pad[2] = cpu_to_le32(pad1); } -static void set_org_pkt_info(u32 buff, u32 buff_len, +static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, struct knav_dma_desc *desc) { - desc->orig_buff = buff; - desc->orig_len = buff_len; + desc->orig_buff = cpu_to_le32(buff); + desc->orig_len = cpu_to_le32(buff_len); } -static void set_words(u32 *words, int num_words, u32 *desc) +static void set_words(u32 *words, int num_words, __le32 *desc) { int i; for (i = 0; i < num_words; i++) - desc[i] = words[i]; + desc[i] = cpu_to_le32(words[i]); } /* Read the e-fuse value as 32 bit values to be endian independent */ @@ -570,6 +581,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, dma_addr_t dma_desc, dma_buf; unsigned int buf_len, dma_sz = sizeof(*ndesc); void *buf_ptr; + u32 pad[2]; u32 tmp; get_words(&dma_desc, 1, &desc->next_desc); @@ -581,13 +593,15 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, break; } get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); - get_pad_info((u32 *)&buf_ptr, &tmp, ndesc); + get_pad_ptr(&buf_ptr, ndesc); dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); __free_page(buf_ptr); knav_pool_desc_put(netcp->rx_pool, desc); } - get_pad_info((u32 *)&buf_ptr, &buf_len, desc); + get_pad_info(&pad[0], &pad[1], &buf_len, desc); + buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); + if (buf_ptr) netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); knav_pool_desc_put(netcp->rx_pool, desc); @@ -625,8 +639,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) dma_addr_t dma_desc, dma_buff; struct netcp_packet p_info; struct sk_buff *skb; + u32 pad[2]; void *org_buf_ptr; - u32 tmp; dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); if (!dma_desc) @@ -639,7 +653,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); - get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc); + get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); + org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); if (unlikely(!org_buf_ptr)) { dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); @@ -664,6 +679,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) /* Fill in the page fragment list */ while (dma_desc) { struct page *page; + void *ptr; ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); if (unlikely(!ndesc)) { @@ -672,14 +688,15 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) } get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); - get_pad_info((u32 *)&page, &tmp, ndesc); + get_pad_ptr(&ptr, ndesc); + page = ptr; if (likely(dma_buff && buf_len && page)) { dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, DMA_FROM_DEVICE); } else { - dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n", - (void *)dma_buff, buf_len, page); + dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n", + &dma_buff, buf_len, page); goto free_desc; } @@ -750,7 +767,6 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) unsigned int buf_len, dma_sz; dma_addr_t dma; void *buf_ptr; - u32 tmp; /* Allocate descriptor */ while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { @@ -761,7 +777,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) } get_org_pkt_info(&dma, &buf_len, desc); - get_pad_info((u32 *)&buf_ptr, &tmp, desc); + get_pad_ptr(&buf_ptr, desc); if (unlikely(!dma)) { dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); @@ -813,7 +829,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) struct page *page; dma_addr_t dma; void *bufptr; - u32 pad[2]; + u32 pad[3]; /* Allocate descriptor */ hwdesc = knav_pool_desc_get(netcp->rx_pool); @@ -830,7 +846,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bufptr = netdev_alloc_frag(primary_buf_len); - pad[1] = primary_buf_len; + pad[2] = primary_buf_len; if (unlikely(!bufptr)) { dev_warn_ratelimited(netcp->ndev_dev, @@ -842,7 +858,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (unlikely(dma_mapping_error(netcp->dev, dma))) goto fail; - pad[0] = (u32)bufptr; + pad[0] = lower_32_bits((uintptr_t)bufptr); + pad[1] = upper_32_bits((uintptr_t)bufptr); } else { /* Allocate a secondary receive queue entry */ @@ -853,8 +870,9 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) } buf_len = PAGE_SIZE; dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); - pad[0] = (u32)page; - pad[1] = 0; + pad[0] = lower_32_bits(dma); + pad[1] = upper_32_bits(dma); + pad[2] = 0; } desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; @@ -864,7 +882,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << KNAV_DMA_DESC_RETQ_SHIFT; set_org_pkt_info(dma, buf_len, hwdesc); - set_pad_info(pad[0], pad[1], hwdesc); + set_pad_info(pad[0], pad[1], pad[2], hwdesc); set_desc_info(desc_info, pkt_info, hwdesc); /* Push to FDQs */ @@ -935,8 +953,8 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, dma_unmap_single(netcp->dev, dma_buf, buf_len, DMA_TO_DEVICE); else - dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n", - (void *)dma_buf, buf_len); + dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n", + &dma_buf, buf_len); knav_pool_desc_put(netcp->tx_pool, ndesc); ndesc = NULL; @@ -953,11 +971,11 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, unsigned int budget) { struct knav_dma_desc *desc; + void *ptr; struct sk_buff *skb; unsigned int dma_sz; dma_addr_t dma; int pkts = 0; - u32 tmp; while (budget--) { dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); @@ -970,7 +988,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, continue; } - get_pad_info((u32 *)&skb, &tmp, desc); + get_pad_ptr(&ptr, desc); + skb = ptr; netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) { dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); @@ -1059,6 +1078,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) u32 page_offset = frag->page_offset; u32 buf_len = skb_frag_size(frag); dma_addr_t desc_dma; + u32 desc_dma_32; u32 pkt_info; dma_addr = dma_map_page(dev, page, page_offset, buf_len, @@ -1075,13 +1095,13 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) goto free_descs; } - desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, - (void *)ndesc); + desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc); pkt_info = (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << KNAV_DMA_DESC_RETQ_SHIFT; set_pkt_info(dma_addr, buf_len, 0, ndesc); - set_words(&desc_dma, 1, &pdesc->next_desc); + desc_dma_32 = (u32)desc_dma; + set_words(&desc_dma_32, 1, &pdesc->next_desc); pkt_len += buf_len; if (pdesc != desc) knav_pool_desc_map(netcp->tx_pool, pdesc, @@ -1129,8 +1149,8 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, p_info.ts_context = NULL; p_info.txtstamp_complete = NULL; p_info.epib = desc->epib; - p_info.psdata = desc->psdata; - memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32)); + p_info.psdata = (u32 __force *)desc->psdata; + memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32)); /* Find out where to inject the packet for transmission */ list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) { @@ -1154,11 +1174,12 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, /* update descriptor */ if (p_info.psdata_len) { - u32 *psdata = p_info.psdata; + /* psdata points to both native-endian and device-endian data */ + __le32 *psdata = (void __force *)p_info.psdata; memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, p_info.psdata_len); - set_words(psdata, p_info.psdata_len, psdata); + set_words(p_info.psdata, p_info.psdata_len, psdata); tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << KNAV_DMA_DESC_PSLEN_SHIFT; } @@ -1173,11 +1194,14 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, } set_words(&tmp, 1, &desc->packet_info); - set_words((u32 *)&skb, 1, &desc->pad[0]); + tmp = lower_32_bits((uintptr_t)&skb); + set_words(&tmp, 1, &desc->pad[0]); + tmp = upper_32_bits((uintptr_t)&skb); + set_words(&tmp, 1, &desc->pad[1]); if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { tmp = tx_pipe->switch_to_port; - set_words((u32 *)&tmp, 1, &desc->tag_info); + set_words(&tmp, 1, &desc->tag_info); } /* submit packet descriptor */ @@ -1990,7 +2014,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, /* NAPI register */ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); - netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); + netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); /* Register the network device */ ndev->dev_id = 0; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 4e70e7586a09..d543298d6750 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2178,7 +2178,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) return -ENODEV; } dev_dbg(priv->dev, "phy found: id is: 0x%s\n", - dev_name(&slave->phy->dev)); + phydev_name(slave->phy)); phy_start(slave->phy); phy_read_status(slave->phy); } @@ -2681,7 +2681,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, slave->phy = NULL; } else { dev_dbg(dev, "phy found: id is: 0x%s\n", - dev_name(&slave->phy->dev)); + phydev_name(slave->phy)); phy_start(slave->phy); phy_read_status(slave->phy); } diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 6f0a4495c7f3..298e059d0498 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -1349,8 +1349,7 @@ static int tile_net_open_inner(struct net_device *dev) */ static void tile_net_open_retry(struct work_struct *w) { - struct delayed_work *dw = - container_of(w, struct delayed_work, work); + struct delayed_work *dw = to_delayed_work(w); struct tile_net_priv *priv = container_of(dw, struct tile_net_priv, retry_work); diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 45ac38d29ed8..54874783476a 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -608,40 +608,25 @@ static void tc_handle_link_change(struct net_device *dev) static int tc_mii_probe(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; + struct phy_device *phydev; u32 dropmask; - /* find the first phy */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (lp->mii_bus->phy_map[phy_addr]) { - if (phydev) { - printk(KERN_ERR "%s: multiple PHYs found\n", - dev->name); - return -EINVAL; - } - phydev = lp->mii_bus->phy_map[phy_addr]; - break; - } - } - + phydev = phy_find_first(lp->mii_bus); if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV; } /* attach the mac to the phy */ - phydev = phy_connect(dev, dev_name(&phydev->dev), + phydev = phy_connect(dev, phydev_name(phydev), &tc_handle_link_change, lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } - printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, id=%x)\n", - dev->name, phydev->drv->name, dev_name(&phydev->dev), - phydev->phy_id); + + phy_attached_info(phydev); /* mask with MAC supported features */ phydev->supported &= PHY_BASIC_FEATURES; @@ -669,7 +654,6 @@ static int tc_mii_init(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int err; - int i; lp->mii_bus = mdiobus_alloc(); if (lp->mii_bus == NULL) { @@ -684,18 +668,9 @@ static int tc_mii_init(struct net_device *dev) (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); lp->mii_bus->priv = dev; lp->mii_bus->parent = &lp->pci_dev->dev; - lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!lp->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mii_bus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - lp->mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(lp->mii_bus); if (err) - goto err_out_free_mdio_irq; + goto err_out_free_mii_bus; err = tc_mii_probe(dev); if (err) goto err_out_unregister_bus; @@ -703,8 +678,6 @@ static int tc_mii_init(struct net_device *dev) err_out_unregister_bus: mdiobus_unregister(lp->mii_bus); -err_out_free_mdio_irq: - kfree(lp->mii_bus->irq); err_out_free_mii_bus: mdiobus_free(lp->mii_bus); err_out: @@ -882,7 +855,6 @@ static void tc35815_remove_one(struct pci_dev *pdev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); free_netdev(dev); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 522abe2ff25a..902457e43628 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -337,7 +337,6 @@ struct temac_local { /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ - int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ /* IO registers, dma functions and IRQs */ void __iomem *regs; diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c index 415de1eaf641..7714aff78b7d 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c +++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c @@ -92,7 +92,6 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) bus->read = temac_mdio_read; bus->write = temac_mdio_write; bus->parent = lp->dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; @@ -114,7 +113,6 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) void temac_mdio_teardown(struct temac_local *lp) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 7cb9abac95c8..9ead4e269409 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -385,7 +385,6 @@ struct axidma_bd { * @phy_dev: Pointer to PHY device structure attached to the axienet_local * @phy_node: Pointer to device node structure * @mii_bus: Pointer to MII bus structure - * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space * @dma_err_tasklet: Tasklet structure to process Axi DMA errors @@ -426,7 +425,6 @@ struct axienet_local { /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ - int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ /* IO registers, dma functions and IRQs */ void __iomem *regs; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 507bbb0355c2..63307ea97846 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -212,7 +212,6 @@ issue: bus->read = axienet_mdio_read; bus->write = axienet_mdio_write; bus->parent = lp->dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; ret = of_mdiobus_register(bus, np1); @@ -232,7 +231,6 @@ issue: void axienet_mdio_teardown(struct axienet_local *lp) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index cf468c87ce57..e324b3092380 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -114,7 +114,6 @@ * @phy_dev: pointer to the PHY device * @phy_node: pointer to the PHY device node * @mii_bus: pointer to the MII bus - * @mdio_irqs: IRQs table for MDIO bus * @last_link: last link status * @has_mdio: indicates whether MDIO is included in the HW */ @@ -135,7 +134,6 @@ struct net_local { struct device_node *phy_node; struct mii_bus *mii_bus; - int mdio_irqs[PHY_MAX_ADDR]; int last_link; bool has_mdio; @@ -829,7 +827,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) dev_info(dev, "MDIO of the phy is not registered yet\n"); else - put_device(&phydev->dev); + put_device(&phydev->mdio.dev); return 0; } @@ -852,7 +850,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus->read = xemaclite_mdio_read; bus->write = xemaclite_mdio_write; bus->parent = dev; - bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; @@ -1196,7 +1193,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev) /* Un-register the mii_bus, if configured */ if (lp->has_mdio) { mdiobus_unregister(lp->mii_bus); - kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } |